CARLsim  6.1.0
CARLsim: a GPU-accelerated SNN simulator
snn_manager.cpp
Go to the documentation of this file.
1 /* * Copyright (c) 2016 Regents of the University of California. All rights reserved.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions
5 * are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * 3. The names of its contributors may not be used to endorse or promote
15 * products derived from this software without specific prior written
16 * permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
22 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * *********************************************************************************************** *
31 * CARLsim
32 * created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran
33 * maintained by:
34 * (MA) Mike Avery <averym@uci.edu>
35 * (MB) Michael Beyeler <mbeyeler@uci.edu>,
36 * (KDC) Kristofor Carlson <kdcarlso@uci.edu>
37 * (TSC) Ting-Shuo Chou <tingshuc@uci.edu>
38 * (HK) Hirak J Kashyap <kashyaph@uci.edu>
39 *
40 * CARLsim v1.0: JM, MDR
41 * CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC
42 * CARLsim3: MB, KDC, TSC
43 * CARLsim4: TSC, HK
44 * CARLsim5: HK, JX, KC
45 * CARLsim6: LN, JX, KC, KW
46 *
47 * CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/
48 * Ver 12/31/2016
49 */
50 
51 #include <snn.h>
52 #include <sstream>
53 #include <algorithm>
54 
55 #include <connection_monitor.h>
57 #include <spike_monitor.h>
58 #include <spike_monitor_core.h>
59 #include <group_monitor.h>
60 #include <group_monitor_core.h>
61 #include <neuron_monitor.h>
62 #include <neuron_monitor_core.h>
63 
64 #include <spike_buffer.h>
65 #include <error_code.h>
66 
67 // \FIXME what are the following for? why were they all the way at the bottom of this file?
68 
69 #define COMPACTION_ALIGNMENT_PRE 16
70 #define COMPACTION_ALIGNMENT_POST 0
71 
75 
76 
77 // TODO: consider moving unsafe computations out of constructor
78 SNN::SNN(const std::string& name, SimMode preferredSimMode, LoggerMode loggerMode, int randSeed)
79  : networkName_(name), preferredSimMode_(preferredSimMode), loggerMode_(loggerMode),
80  randSeed_(SNN::setRandSeed(randSeed)) // all of these are const
81 {
82  // move all unsafe operations out of constructor
83  SNNinit();
84 }
85 
86 // destructor
88  if (!simulatorDeleted)
89  deleteObjects();
90 }
91 
95 
96 // make from each neuron in grpId1 to 'numPostSynapses' neurons in grpId2
97 short int SNN::connect(int grpId1, int grpId2, const std::string& _type, float initWt, float maxWt, float prob,
98  uint8_t minDelay, uint8_t maxDelay, RadiusRF radius,
99  float _mulSynFast, float _mulSynSlow, bool synWtType) {
100  //const std::string& wtType
101  int retId=-1;
102  assert(grpId1 < numGroups);
103  assert(grpId2 < numGroups);
104  assert(minDelay <= maxDelay);
105  assert(!isPoissonGroup(grpId2));
106 
107  //* \deprecated Do these ramp thingies still work?
108 // bool useRandWts = (wtType.find("random") != std::string::npos);
109 // bool useRampDownWts = (wtType.find("ramp-down") != std::string::npos);
110 // bool useRampUpWts = (wtType.find("ramp-up") != std::string::npos);
111 // uint32_t connProp = SET_INITWTS_RANDOM(useRandWts)
112 // | SET_CONN_PRESENT(1)
113 // | SET_FIXED_PLASTIC(synWtType)
114 // | SET_INITWTS_RAMPUP(useRampUpWts)
115 // | SET_INITWTS_RAMPDOWN(useRampDownWts);
116  uint32_t connProp = SET_CONN_PRESENT(1) | SET_FIXED_PLASTIC(synWtType);
117 
118  Grid3D szPre = getGroupGrid3D(grpId1);
119  Grid3D szPost = getGroupGrid3D(grpId2);
120 
121  // initialize configuration of a connection
122  ConnectConfig connConfig;
123 
124  connConfig.grpSrc = grpId1;
125  connConfig.grpDest = grpId2;
126  connConfig.initWt = initWt;
127  connConfig.maxWt = maxWt;
128  connConfig.maxDelay = maxDelay;
129  connConfig.minDelay = minDelay;
130 // newInfo->radX = (radX<0) ? MAX(szPre.x,szPost.x) : radX; // <0 means full connectivity, so the
131 // newInfo->radY = (radY<0) ? MAX(szPre.y,szPost.y) : radY; // effective group size is Grid3D.x. Grab
132 // newInfo->radZ = (radZ<0) ? MAX(szPre.z,szPost.z) : radZ; // the larger of pre / post to connect all
133  connConfig.connRadius = radius;
134  connConfig.mulSynFast = _mulSynFast;
135  connConfig.mulSynSlow = _mulSynSlow;
136  connConfig.connProp = connProp;
137  connConfig.connProbability = prob;
138  connConfig.type = CONN_UNKNOWN;
139  connConfig.connectionMonitorId = -1;
140  connConfig.connId = -1;
141  connConfig.conn = NULL;
142  connConfig.numberOfConnections = 0;
143 
144  if ( _type.find("random") != std::string::npos) {
145  connConfig.type = CONN_RANDOM;
146  }
147  //so you're setting the size to be prob*Number of synapses in group info + some standard deviation ...
148  else if ( _type.find("full-no-direct") != std::string::npos) {
149  connConfig.type = CONN_FULL_NO_DIRECT;
150  }
151  else if ( _type.find("full") != std::string::npos) {
152  connConfig.type = CONN_FULL;
153  }
154  else if ( _type.find("one-to-one") != std::string::npos) {
155  connConfig.type = CONN_ONE_TO_ONE;
156  } else if ( _type.find("gaussian") != std::string::npos) {
157  connConfig.type = CONN_GAUSSIAN;
158  } else {
159  KERNEL_ERROR("Invalid connection type (should be 'random', 'full', 'one-to-one', 'full-no-direct', or 'gaussian')");
161  }
162 
163  // assign connection id
164  assert(connConfig.connId == -1);
165  connConfig.connId = numConnections;
166 
167  KERNEL_DEBUG("CONNECT SETUP: connId=%d, mulFast=%f, mulSlow=%f", connConfig.connId, connConfig.mulSynFast, connConfig.mulSynSlow);
168 
169  // store the configuration of a connection
170  connectConfigMap[numConnections] = connConfig; // connConfig.connId == numConnections
171 
172  assert(numConnections < MAX_CONN_PER_SNN); // make sure we don't overflow connId
173  numConnections++;
174 
175  return (numConnections - 1);
176 }
177 
178 // make custom connections from grpId1 to grpId2
179 short int SNN::connect(int grpId1, int grpId2, ConnectionGeneratorCore* conn, float _mulSynFast, float _mulSynSlow,
180  bool synWtType) {
181  int retId=-1;
182 
183  assert(grpId1 < numGroups);
184  assert(grpId2 < numGroups);
185 
186  // initialize the configuration of a connection
187  ConnectConfig connConfig;
188  STDPConfig stdpConfig;
189 
190  connConfig.grpSrc = grpId1;
191  connConfig.grpDest = grpId2;
192  connConfig.initWt = 0.0f;
193  connConfig.maxWt = 0.0f;
194  connConfig.maxDelay = MAX_SYN_DELAY;
195  connConfig.minDelay = 1;
196  connConfig.mulSynFast = _mulSynFast;
197  connConfig.mulSynSlow = _mulSynSlow;
198  connConfig.connProp = SET_CONN_PRESENT(1) | SET_FIXED_PLASTIC(synWtType);
199  connConfig.type = CONN_USER_DEFINED;
200  connConfig.conn = conn;
201  connConfig.connectionMonitorId = -1;
202  connConfig.connId = -1;
203  connConfig.numberOfConnections = 0;
204  connConfig.stdpConfig = stdpConfig;
205 
206 
207  // assign a connection id
208  assert(connConfig.connId == -1);
209  connConfig.connId = numConnections;
210 
211  // store the configuration of a connection
212  connectConfigMap[numConnections] = connConfig; // connConfig.connId == numConnections
213 
214  assert(numConnections < MAX_CONN_PER_SNN); // make sure we don't overflow connId
215  numConnections++;
216 
217  return (numConnections - 1);
218 }
219 
220 // make a compartmental connection between two groups
221 short int SNN::connectCompartments(int grpIdLower, int grpIdUpper) {
222  assert(grpIdLower >= 0 && grpIdLower < numGroups);
223  assert(grpIdUpper >= 0 && grpIdUpper < numGroups);
224  assert(grpIdLower != grpIdUpper);
225  assert(!isPoissonGroup(grpIdLower));
226  assert(!isPoissonGroup(grpIdUpper));
227 
228  // the two groups must be located on the same partition
229  assert(groupConfigMap[grpIdLower].preferredNetId == groupConfigMap[grpIdUpper].preferredNetId);
230 
231  // this flag must be set if any compartmental connections exist
232  // note that grpId.withCompartments is not necessarily set just yet, this will be done in
233  // CpuSNN::setCompartmentParameters
234  sim_with_compartments = true;
235 
236  compConnectConfig compConnConfig;
237 
238  compConnConfig.grpSrc = grpIdLower;
239  compConnConfig.grpDest = grpIdUpper;
240  compConnConfig.connId = -1;
241 
242  // assign a connection id
243  assert(compConnConfig.connId == -1);
244  compConnConfig.connId = numCompartmentConnections;
245 
246  // store the configuration of a connection
247  compConnectConfigMap[numCompartmentConnections] = compConnConfig;
248 
249  numCompartmentConnections++;
250 
251  return (numCompartmentConnections - 1);
252 }
253 
254 // create group of Izhikevich neurons
255 // use int for nNeur to avoid arithmetic underflow
256 int SNN::createGroup(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
257  assert(grid.numX * grid.numY * grid.numZ > 0);
258  assert(neurType >= 0);
259  assert(numGroups < MAX_GRP_PER_SNN);
260 
261  if ( (!(neurType & TARGET_AMPA) && !(neurType & TARGET_NMDA) &&
262  !(neurType & TARGET_GABAa) && !(neurType & TARGET_GABAb)) || (neurType & POISSON_NEURON)) {
263  KERNEL_ERROR("Invalid type using createGroup... Cannot create poisson generators here.");
265  }
266 
267  // initialize group configuration
268  GroupConfig grpConfig;
269  GroupConfigMD grpConfigMD;
270 
271  //All groups are non-compartmental by default
272  grpConfig.withCompartments = false;
273 
274  // init parameters of neural group size and location
275  grpConfig.grpName = grpName;
276  grpConfig.type = neurType;
277  grpConfig.numN = grid.N;
278 
279  grpConfig.isSpikeGenerator = false;
280  grpConfig.grid = grid;
281  grpConfig.isLIF = false;
282 
283  grpConfig.WithSTDP = false;
284  grpConfig.WithDA_MOD = false;
285 #ifdef LN_AXON_PLAST
286  grpConfig.WithAxonPlast = true;
287  grpConfig.AxonPlast_TAU = 25;
288 #endif
289 
290  if (preferredPartition == ANY) {
291  grpConfig.preferredNetId = ANY;
292  } else if (preferredBackend == CPU_CORES) {
293  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
294  } else {
295  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
296  }
297 
298  // assign a global group id
299  grpConfigMD.gGrpId = numGroups;
300 
301  // store the configuration of a group
302  groupConfigMap[numGroups] = grpConfig; // numGroups == grpId
303  groupConfigMDMap[numGroups] = grpConfigMD;
304 
305  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
306  numGroups++;
307 
308  return grpConfigMD.gGrpId;
309 }
310 
311 // create group of LIF neurons
312 // use int for nNeur to avoid arithmetic underflow
313 int SNN::createGroupLIF(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
314  assert(grid.numX * grid.numY * grid.numZ > 0);
315  assert(neurType >= 0);
316  assert(numGroups < MAX_GRP_PER_SNN);
317 
318  if ( (!(neurType & TARGET_AMPA) && !(neurType & TARGET_NMDA) &&
319  !(neurType & TARGET_GABAa) && !(neurType & TARGET_GABAb)) || (neurType & POISSON_NEURON)) {
320  KERNEL_ERROR("Invalid type using createGroup... Cannot create poisson generators here.");
322  }
323 
324  // initialize group configuration
325  GroupConfig grpConfig;
326  GroupConfigMD grpConfigMD;
327 
328  // init parameters of neural group size and location
329  grpConfig.grpName = grpName;
330  grpConfig.type = neurType;
331  grpConfig.numN = grid.N;
332 
333  grpConfig.isLIF = true;
334  grpConfig.isSpikeGenerator = false;
335  grpConfig.grid = grid;
336 
337  grpConfig.WithSTDP = false;
338  grpConfig.WithDA_MOD = false;
339 #ifdef LN_AXON_PLAST
340  grpConfig.WithAxonPlast = false;
341 #endif
342 
343  if (preferredPartition == ANY) {
344  grpConfig.preferredNetId = ANY;
345  } else if (preferredBackend == CPU_CORES) {
346  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
347  } else {
348  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
349  }
350 
351  // assign a global group id
352  grpConfigMD.gGrpId = numGroups;
353 
354  // store the configuration of a group
355  groupConfigMap[numGroups] = grpConfig; // numGroups == grpId
356  groupConfigMDMap[numGroups] = grpConfigMD;
357 
358  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
359  numGroups++;
360 
361  return grpConfigMD.gGrpId;
362 }
363 
364 // create spike generator group
365 // use int for nNeur to avoid arithmetic underflow
366 int SNN::createSpikeGeneratorGroup(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
367  assert(grid.numX * grid.numY * grid.numZ > 0);
368  assert(neurType >= 0);
369  assert(numGroups < MAX_GRP_PER_SNN);
370 
371  // initialize group configuration
372  GroupConfig grpConfig;
373  GroupConfigMD grpConfigMD;
374 
375  //All groups are non-compartmental by default FIXME:IS THIS NECESSARY?
376  grpConfig.withCompartments = false;
377 
378  // init parameters of neural group size and location
379  grpConfig.grpName = grpName;
380  grpConfig.type = neurType | POISSON_NEURON;
381  grpConfig.numN = grid.N;
382  grpConfig.isSpikeGenerator = true;
383  grpConfig.grid = grid;
384  grpConfig.isLIF = false;
385 
386  grpConfig.WithSTDP = false;
387  grpConfig.WithDA_MOD = false;
388 #ifdef LN_AXON_PLAST
389  grpConfig.WithAxonPlast = false;
390 #endif
391 
392  if (preferredPartition == ANY) {
393  grpConfig.preferredNetId = ANY;
394  }
395  else if (preferredBackend == CPU_CORES) {
396  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
397  }
398  else {
399  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
400  }
401 
402  // assign a global group id
403  grpConfigMD.gGrpId = numGroups;
404 
405  // store the configuration of a group
406  groupConfigMap[numGroups] = grpConfig;
407  groupConfigMDMap[numGroups] = grpConfigMD;
408 
409  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
410  numGroups++;
411  numSpikeGenGrps++;
412 
413  return grpConfigMD.gGrpId;
414 }
415 
416 void SNN::setCompartmentParameters(int gGrpId, float couplingUp, float couplingDown) {
417  if (gGrpId == ALL) {
418  for (int grpId = 0; grpId<numGroups; grpId++) {
419  setCompartmentParameters(grpId, couplingUp, couplingDown);
420  }
421  }
422  else {
423  groupConfigMap[gGrpId].withCompartments = true;
424  groupConfigMap[gGrpId].compCouplingUp = couplingUp;
425  groupConfigMap[gGrpId].compCouplingDown = couplingDown;
426  glbNetworkConfig.numComp += groupConfigMap[gGrpId].numN;
427  }
428 }
429 
430 #define LN_I_CALC_TYPES__REQUIRED_FOR_BACKWARD_COMPAT
431 #define LN_I_CALC_TYPES__REQUIRED_FOR_NETWORK_LEVEL
432 // set conductance values for a simulation (custom values or disable conductances alltogether)
433 void SNN::setConductances(bool isSet, int tdAMPA, int trNMDA, int tdNMDA, int tdGABAa, int trGABAb, int tdGABAb) {
434  if (isSet) {
435  assert(tdAMPA>0); assert(tdNMDA>0); assert(tdGABAa>0); assert(tdGABAb>0);
436  assert(trNMDA>=0); assert(trGABAb>=0); // 0 to disable rise times
437  assert(trNMDA!=tdNMDA); assert(trGABAb!=tdGABAb); // singularity
438  }
439 
440  // set conductances globally for all connections
441  sim_with_conductances |= isSet;
442  dAMPA = 1.0-1.0/tdAMPA;
443  dNMDA = 1.0-1.0/tdNMDA;
444  dGABAa = 1.0-1.0/tdGABAa;
445  dGABAb = 1.0-1.0/tdGABAb;
446 
447  if (trNMDA>0) {
448  // use rise time for NMDA
449  sim_with_NMDA_rise = true;
450  rNMDA = 1.0-1.0/trNMDA;
451 
452  // compute max conductance under this model to scale it back to 1
453  // otherwise the peak conductance will not be equal to the weight
454  double tmax = (-tdNMDA*trNMDA*log(1.0*trNMDA/tdNMDA))/(tdNMDA-trNMDA); // t at which cond will be max
455  sNMDA = 1.0/(exp(-tmax/tdNMDA)-exp(-tmax/trNMDA)); // scaling factor, 1 over max amplitude
456  //assert(!std::isinf<double>(tmax) && !std::isnan<double>(tmax) && tmax >= 0);
457  assert(!std::isinf(tmax) && !std::isnan(tmax) && tmax >= 0);
458  assert(!std::isinf(sNMDA) && !std::isnan(sNMDA) && sNMDA>0);
459  }
460 
461  if (trGABAb>0) {
462  // use rise time for GABAb
463  sim_with_GABAb_rise = true;
464  rGABAb = 1.0-1.0/trGABAb;
465 
466  // compute max conductance under this model to scale it back to 1
467  // otherwise the peak conductance will not be equal to the weight
468  double tmax = (-tdGABAb*trGABAb*log(1.0*trGABAb/tdGABAb))/(tdGABAb-trGABAb); // t at which cond will be max
469  sGABAb = 1.0/(exp(-tmax/tdGABAb)-exp(-tmax/trGABAb)); // scaling factor, 1 over max amplitude
470  assert(!std::isinf(tmax) && !std::isnan(tmax)); assert(!std::isinf(sGABAb) && !std::isnan(sGABAb) && sGABAb>0);
471  }
472 
473  if (sim_with_conductances) {
474  KERNEL_INFO("Running COBA mode:");
475  KERNEL_INFO(" - AMPA decay time = %5d ms", tdAMPA);
476  KERNEL_INFO(" - NMDA rise time %s = %5d ms", sim_with_NMDA_rise?" ":"(disabled)", trNMDA);
477  KERNEL_INFO(" - GABAa decay time = %5d ms", tdGABAa);
478  KERNEL_INFO(" - GABAb rise time %s = %5d ms", sim_with_GABAb_rise?" ":"(disabled)",trGABAb);
479  KERNEL_INFO(" - GABAb decay time = %5d ms", tdGABAb);
480  } else {
481  KERNEL_INFO("Running CUBA mode (all synaptic conductances disabled)");
482  }
483 }
484 #ifdef LN_I_CALC_TYPES
485 // set conductance values for a group (custom values or disable conductances)
486 // LN2021
487 // control conductance values at group level
488 // same interface design is used in order to maintain backward compatibilty
489 // Defaul CUBA => singel group(s) opt in to COBA with special paramters; however Network does need to support COBA despite
490 // Default COBA => single group(s) opt out with CUBA; Network already does support COBA
491 // Default CUBA & NMweighted => other context, here irrelevant
492 void SNN::setConductances(int gGrpId, bool isSet, int tdAMPA, int trNMDA, int tdNMDA, int tdGABAa, int trGABAb, int tdGABAb) {
493 
494  if (isSet) {
495  assert(tdAMPA > 0); assert(tdNMDA > 0); assert(tdGABAa > 0); assert(tdGABAb > 0);
496  assert(trNMDA >= 0); assert(trGABAb >= 0); // 0 to disable rise times
497  assert(trNMDA != tdNMDA); assert(trGABAb != tdGABAb); // singularity
498  }
499 
500  // set conductances globaly for all connections
501  sim_with_conductances |= isSet;
502 
503  // however, group default CUBA => and globally set to COBA
504  // => Default set to UNKNOWN, then how ever it needs to be defined if COBA or CUBA
505  // => need some basic to set CUBA ? or use the same interface than isSet = false is used to set CUBA
506  // and sim_with_conductances is .. l
507 
508  // globalGroup ID
509  auto &groupConfig = groupConfigMap[gGrpId];
510  groupConfig.icalcType = isSet ? COBA : CUBA; // generic reused method, in CUBA the conductance params are 0
511 
512  auto &config = groupConfig.conductanceConfig;
513  config.dAMPA = 1.0f-1.0f/tdAMPA;;
514  config.dNMDA = 1.0f-1.0f/tdNMDA;
515  config.dGABAa = 1.0f-1.0f/tdGABAa;
516  config.dGABAb = 1.0f-1.0f/tdGABAb;
517 
518  if (trNMDA > 0) {
519  // use rise time for NMDA
520  sim_with_NMDA_rise = true;
521  groupConfig.with_NMDA_rise = true;
522  config.rNMDA = 1.0f-1.0f/trNMDA;
523 
524  // compute max conductance under this model to scale it back to 1
525  // otherwise the peak conductance will not be equal to the weight
526  float tmax = (-tdNMDA * trNMDA * log(1.0f * trNMDA / tdNMDA)) / (tdNMDA - trNMDA); // t at which cond will be max
527  config.sNMDA = 1.0f / (exp(-tmax / tdNMDA) - exp(-tmax / trNMDA)); // scaling factor, 1 over max amplitude
528  assert(!std::isinf(tmax) && !std::isnan(tmax) && tmax >= 0.0f);
529  assert(!std::isinf(config.sNMDA) && !std::isnan(config.sNMDA) && config.sNMDA > 0.0f);
530  //assert(!std::isinf<float>(tmax) && !std::isnan<float>(tmax) && tmax >= 0.0f);
531  //assert(!std::isinf<float>(config.sNMDA) && !std::isnan<float>(config.sNMDA) && config.sNMDA > 0.0f);
532  }
533 
534  if (trGABAb > 0) {
535  // use rise time for GABAb
536  sim_with_GABAb_rise = true;
537  groupConfig.with_GABAb_rise = true;
538  config.rGABAb = 1.0f-1.0f/trGABAb;
539 
540  // compute max conductance under this model to scale it back to 1
541  // otherwise the peak conductance will not be equal to the weight
542  float tmax = (-tdGABAb * trGABAb * log(1.0f * trGABAb / tdGABAb)) / (tdGABAb - trGABAb); // t at which cond will be max
543  config.sGABAb = 1.0f / (exp(-tmax / tdGABAb) - exp(-tmax / trGABAb)); // scaling factor, 1 over max amplitude
544  assert(!std::isinf(tmax) && !std::isnan(tmax)); assert(!std::isinf(config.sGABAb) && !std::isnan(config.sGABAb) && config.sGABAb > 0.0f); // LN2021 fix gcc
545  //assert(!std::isinf<float>(tmax) && !std::isnan<float>(tmax)); assert(!std::isinf<float>(config.sGABAb) && !std::isnan<float>(config.sGABAb) && config.sGABAb > 0.0f); // obsolete
546  }
547 
548  if (isSet) {
549  KERNEL_INFO("Running group (G:%d) COBA mode:", gGrpId);
550  KERNEL_INFO(" - AMPA decay time = %5d ms", tdAMPA);
551  KERNEL_INFO(" - NMDA rise time %s = %5d ms", (trNMDA > 0) ? " " : "(disabled)", trNMDA);
552  KERNEL_INFO(" - GABAa decay time = %5d ms", tdGABAa);
553  KERNEL_INFO(" - GABAb rise time %s = %5d ms", (trGABAb > 0) ? " " : "(disabled)", trGABAb);
554  KERNEL_INFO(" - GABAb decay time = %5d ms", tdGABAb);
555  }
556  else {
557  KERNEL_INFO("Running group %d in CUBA mode (synaptic conductances disabled)", gGrpId);
558  }
559 }
560 
561 
562 void SNN::setNM4weighted(int gGrpId, IcalcType icalc, float wDA, float w5HT, float wACh, float wNE, float wNorm, float wBase) {
563 
564  // globalGroup ID
565  auto &groupConfig = groupConfigMap[gGrpId];
566 
567  groupConfig.icalcType = icalc;
568 
569  auto &w = groupConfig.nm4wConfig.w;
570 
571  w[NM_DA] = wDA;
572  w[NM_5HT] = w5HT;
573  w[NM_ACh] = wACh;
574  w[NM_NE] = wNE;
575  w[NM_NE+1] = wNorm;
576  w[NM_NE+2] = wBase;
577 
578  KERNEL_INFO("Running group (G:%d) IcalcType: %s", gGrpId, IcalcType_string[icalc]);
579  KERNEL_INFO(" - Weights (DA, 5HT, ACh, NE) = %.1f %.1f %.1f %.1f ", wDA, w5HT, wACh, wNE);
580  KERNEL_INFO(" - Normalization/Boost, Base = %.1f %.1f", wNorm, wBase);
581 
582 }
583 
584 
585 
586 
587 #endif
588 
589 // set homeostasis for group
590 void SNN::setHomeostasis(int gGrpId, bool isSet, float homeoScale, float avgTimeScale) {
591  if (gGrpId == ALL) { // shortcut for all groups
592  for(int grpId = 0; grpId < numGroups; grpId++) {
593  setHomeostasis(grpId, isSet, homeoScale, avgTimeScale);
594  }
595  } else {
596  // set conductances for a given group
597  sim_with_homeostasis |= isSet;
598  groupConfigMap[gGrpId].homeoConfig.WithHomeostasis = isSet;
599  groupConfigMap[gGrpId].homeoConfig.homeostasisScale = homeoScale;
600  groupConfigMap[gGrpId].homeoConfig.avgTimeScale = avgTimeScale;
601  groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv = 1.0f / avgTimeScale;
602  groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay = (avgTimeScale * 1000.0f - 1.0f) / (avgTimeScale * 1000.0f);
603 
604  KERNEL_INFO("Homeostasis parameters %s for %d (%s):\thomeoScale: %f, avgTimeScale: %f",
605  isSet?"enabled":"disabled", gGrpId, groupConfigMap[gGrpId].grpName.c_str(), homeoScale, avgTimeScale);
606  }
607 }
608 
609 // set a homeostatic target firing rate (enforced through homeostatic synaptic scaling)
610 void SNN::setHomeoBaseFiringRate(int gGrpId, float baseFiring, float baseFiringSD) {
611  if (gGrpId == ALL) { // shortcut for all groups
612  for(int grpId = 0; grpId < numGroups; grpId++) {
613  setHomeoBaseFiringRate(grpId, baseFiring, baseFiringSD);
614  }
615  } else {
616  // set homeostatsis for a given group
617  groupConfigMap[gGrpId].homeoConfig.baseFiring = baseFiring;
618  groupConfigMap[gGrpId].homeoConfig.baseFiringSD = baseFiringSD;
619 
620  KERNEL_INFO("Homeostatic base firing rate set for %d (%s):\tbaseFiring: %3.3f, baseFiringStd: %3.3f",
621  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), baseFiring, baseFiringSD);
622  }
623 }
624 
625 
626 void SNN::setIntegrationMethod(integrationMethod_t method, int numStepsPerMs) {
627  assert(numStepsPerMs >= 1 && numStepsPerMs <= 100);
628  glbNetworkConfig.simIntegrationMethod = method;
629  glbNetworkConfig.simNumStepsPerMs = numStepsPerMs;
630  glbNetworkConfig.timeStep = 1.0f / numStepsPerMs;
631 }
632 
633 // set Izhikevich parameters for group
634 void SNN::setNeuronParameters(int gGrpId, float izh_a, float izh_a_sd, float izh_b, float izh_b_sd,
635  float izh_c, float izh_c_sd, float izh_d, float izh_d_sd)
636 {
637  assert(gGrpId >= -1);
638  assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
639 
640  if (gGrpId == ALL) { // shortcut for all groups
641  for(int grpId = 0; grpId < numGroups; grpId++) {
642  setNeuronParameters(grpId, izh_a, izh_a_sd, izh_b, izh_b_sd, izh_c, izh_c_sd, izh_d, izh_d_sd);
643  }
644  } else {
645  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
646  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
647  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
648  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
649  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
650  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
651  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
652  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
653  groupConfigMap[gGrpId].withParamModel_9 = 0;
654  groupConfigMap[gGrpId].isLIF = 0;
655  }
656 }
657 
658 // set (9) Izhikevich parameters for group
659 void SNN::setNeuronParameters(int gGrpId, float izh_C, float izh_C_sd, float izh_k, float izh_k_sd,
660  float izh_vr, float izh_vr_sd, float izh_vt, float izh_vt_sd,
661  float izh_a, float izh_a_sd, float izh_b, float izh_b_sd,
662  float izh_vpeak, float izh_vpeak_sd, float izh_c, float izh_c_sd,
663  float izh_d, float izh_d_sd)
664 {
665  assert(gGrpId >= -1);
666  assert(izh_C_sd >= 0); assert(izh_k_sd >= 0); assert(izh_vr_sd >= 0);
667  assert(izh_vt_sd >= 0); assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_vpeak_sd >= 0);
668  assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
669 
670  if (gGrpId == ALL) { // shortcut for all groups
671  for (int grpId = 0; grpId<numGroups; grpId++) {
672  setNeuronParameters(grpId, izh_C, izh_C_sd, izh_k, izh_k_sd, izh_vr, izh_vr_sd, izh_vt, izh_vt_sd,
673  izh_a, izh_a_sd, izh_b, izh_b_sd, izh_vpeak, izh_vpeak_sd, izh_c, izh_c_sd,
674  izh_d, izh_d_sd);
675  }
676  }
677  else {
678  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
679  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
680  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
681  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
682  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
683  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
684  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
685  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
686  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C = izh_C;
687  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd = izh_C_sd;
688  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k = izh_k;
689  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd = izh_k_sd;
690  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr = izh_vr;
691  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd = izh_vr_sd;
692  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt = izh_vt;
693  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd = izh_vt_sd;
694  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak = izh_vpeak;
695  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd = izh_vpeak_sd;
696  groupConfigMap[gGrpId].withParamModel_9 = 1;
697  groupConfigMap[gGrpId].isLIF = 0;
698  KERNEL_INFO("Set a nine parameter group!");
699  }
700 }
701 
702 
703 // set LIF parameters for the group
704 void SNN::setNeuronParametersLIF(int gGrpId, int tau_m, int tau_ref, float vTh, float vReset, double minRmem, double maxRmem)
705 {
706  assert(gGrpId >= -1);
707  assert(tau_m >= 0); assert(tau_ref >= 0); assert(vReset < vTh);
708  assert(minRmem >= 0.0f); assert(minRmem <= maxRmem);
709 
710  if (gGrpId == ALL) { // shortcut for all groups
711  for(int grpId = 0; grpId < numGroups; grpId++) {
712  setNeuronParametersLIF(grpId, tau_m, tau_ref, vTh, vReset, minRmem, maxRmem);
713  }
714  } else {
715  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m = tau_m;
716  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref = tau_ref;
717  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh = vTh;
718  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset = vReset;
719  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem = minRmem;
720  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem = maxRmem;
721  groupConfigMap[gGrpId].withParamModel_9 = 0;
722  groupConfigMap[gGrpId].isLIF = 1;
723  }
724 }
725 
726 
727 void SNN::setNeuromodulator(int gGrpId,
728  float baseDP, float tauDP,
729  float base5HT, float tau5HT,
730  float baseACh, float tauACh,
731  float baseNE, float tauNE)
732 {
733  return setNeuromodulator(gGrpId,
734  baseDP, tauDP, 0.04f, true,
735  base5HT, tau5HT, 0.04f, true,
736  baseACh, tauACh, 0.04f, true,
737  baseNE, tauNE, 0.04f, true);
738 }
739 
740 void SNN::setNeuromodulator(int gGrpId,
741  float baseDP, float tauDP, float releaseDP, bool activeDP,
742  float base5HT, float tau5HT, float release5HT, bool active5HT,
743  float baseACh, float tauACh, float releaseACh, bool activeACh,
744  float baseNE, float tauNE, float releaseNE, bool activeNE)
745 {
746  assert(gGrpId >= -1);
747  assert(baseDP >= 0.0f); assert(base5HT >= 0.0f); assert(baseACh >= 0.0f); assert(baseNE >= 0.0f); // LN2021 relexed to non-negative
748  assert(tauDP > 0); assert(tau5HT > 0); assert(tauACh > 0); assert(tauNE > 0);
749 
750  if (gGrpId == ALL) { // shortcut for all groups
751  for (int grpId = 0; grpId < numGroups; grpId++) {
752  setNeuromodulator(grpId,
753  baseDP, tauDP, releaseDP, activeDP,
754  base5HT, tau5HT, release5HT, active5HT,
755  baseACh, tauACh, releaseACh, activeACh,
756  baseNE, tauNE, releaseNE, activeNE);
757  }
758  } else {
759  groupConfigMap[gGrpId].neuromodulatorConfig.baseDP = baseDP;
760  groupConfigMap[gGrpId].neuromodulatorConfig.decayDP = 1.0f - (1.0f / tauDP);
761  groupConfigMap[gGrpId].neuromodulatorConfig.releaseDP = releaseDP;
762  groupConfigMap[gGrpId].neuromodulatorConfig.activeDP = activeDP;
763 
764  groupConfigMap[gGrpId].neuromodulatorConfig.base5HT = base5HT;
765  groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT = 1.0f - (1.0f / tau5HT);
766  groupConfigMap[gGrpId].neuromodulatorConfig.release5HT = release5HT;
767  groupConfigMap[gGrpId].neuromodulatorConfig.active5HT = active5HT;
768 
769  groupConfigMap[gGrpId].neuromodulatorConfig.baseACh = baseACh;
770  groupConfigMap[gGrpId].neuromodulatorConfig.decayACh = 1.0f - (1.0f / tauACh);
771  groupConfigMap[gGrpId].neuromodulatorConfig.releaseACh = releaseACh;
772  groupConfigMap[gGrpId].neuromodulatorConfig.activeACh = activeACh;
773 
774  groupConfigMap[gGrpId].neuromodulatorConfig.baseNE = baseNE;
775  groupConfigMap[gGrpId].neuromodulatorConfig.decayNE = 1.0f - (1.0f / tauNE);
776  groupConfigMap[gGrpId].neuromodulatorConfig.releaseNE = releaseNE;
777  groupConfigMap[gGrpId].neuromodulatorConfig.activeNE = activeNE;
778  }
779 }
780 
781 // set ESTDP params
782 void SNN::setESTDP(int preGrpId, int postGrpId, bool isSet, STDPType type, STDPCurve curve, float alphaPlus, float tauPlus, float alphaMinus, float tauMinus, float gamma) {
783  assert(preGrpId >= -1);
784  assert(postGrpId >= -1);
785  assert(IS_EXCITATORY_TYPE(groupConfigMap[preGrpId].type) == true);
786 
787  if (isSet) {
788  assert(type!=UNKNOWN_STDP);
789  assert(tauPlus > 0.0f); assert(tauMinus > 0.0f); assert(gamma >= 0.0f);
790  }
791 
792  short int connId = getConnectId(preGrpId, postGrpId);
793  if (connId < 0) {
794  KERNEL_ERROR("No connection found from group %d(%s) to group %d(%s)", preGrpId, getGroupName(preGrpId).c_str(),
795  postGrpId, getGroupName(postGrpId).c_str());
797  }
798 
799  // set STDP for a given connection
800  // set params for STDP curve
801  connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC = alphaPlus;
802  connectConfigMap[connId].stdpConfig.ALPHA_MINUS_EXC = alphaMinus;
803  connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC = 1.0f / tauPlus;
804  connectConfigMap[connId].stdpConfig.TAU_MINUS_INV_EXC = 1.0f / tauMinus;
805  connectConfigMap[connId].stdpConfig.GAMMA = gamma;
806  connectConfigMap[connId].stdpConfig.KAPPA = (1 + exp(-gamma / tauPlus)) / (1 - exp(-gamma / tauPlus));
807  connectConfigMap[connId].stdpConfig.OMEGA = alphaPlus * (1 - connectConfigMap[connId].stdpConfig.KAPPA);
808  // set flags for STDP function
809  connectConfigMap[connId].stdpConfig.WithESTDPtype = type;
810  connectConfigMap[connId].stdpConfig.WithESTDPcurve = curve;
811  connectConfigMap[connId].stdpConfig.WithESTDP = isSet;
812  connectConfigMap[connId].stdpConfig.WithSTDP |= connectConfigMap[connId].stdpConfig.WithESTDP;
813  sim_with_stdp |= connectConfigMap[connId].stdpConfig.WithSTDP;
814 
815  groupConfigMap[postGrpId].WithSTDP |= connectConfigMap[connId].stdpConfig.WithSTDP;
816  groupConfigMap[postGrpId].WithDA_MOD |= (type == DA_MOD);
817 #ifdef LN_I_CALC_TYPES
818  groupConfigMap[postGrpId].WithPKA_PLC_MOD |= (type == PKA_PLC_MOD);
819 #endif
820 
821  KERNEL_INFO("E-STDP %s for %s(%d) to %s(%d)", isSet?"enabled":"disabled", groupConfigMap[preGrpId].grpName.c_str(), preGrpId,
822  groupConfigMap[postGrpId].grpName.c_str(), postGrpId);
823 }
824 
825 #ifdef LN_I_CALC_TYPES
826 // set ESTDP params
827 void SNN::setESTDP(int preGrpId, int postGrpId, bool isSet, STDPType type, STDPCurve curve, float alphaPlus, float tauPlus, float alphaMinus, float tauMinus, int nm_pka, float w_pka, int nm_plc, float w_plc)
828 {
829  assert(preGrpId >= -1);
830  assert(postGrpId >= -1);
831  assert(IS_EXCITATORY_TYPE(groupConfigMap[preGrpId].type) == true);
832 
833  if (isSet) {
834  assert(type != UNKNOWN_STDP);
835  assert(tauPlus > 0.0f); assert(tauMinus > 0.0f);
836  assert(w_pka >= 0.0f); assert(w_plc >= 0.0f);
837  assert(nm_pka >= 0); assert(nm_plc >= 0);
838  }
839 
840  short int connId = getConnectId(preGrpId, postGrpId);
841  if (connId < 0) {
842  KERNEL_ERROR("No connection found from group %d(%s) to group %d(%s)", preGrpId, getGroupName(preGrpId).c_str(),
843  postGrpId, getGroupName(postGrpId).c_str());
845  }
846 
847  // set STDP for a given connection
848  // set params for STDP curve
849  connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC = alphaPlus;
850  connectConfigMap[connId].stdpConfig.ALPHA_MINUS_EXC = alphaMinus;
851  connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC = 1.0f / tauPlus;
852  connectConfigMap[connId].stdpConfig.TAU_MINUS_INV_EXC = 1.0f / tauMinus;
853  connectConfigMap[connId].stdpConfig.GAMMA = 0.0f;
854  connectConfigMap[connId].stdpConfig.KAPPA = 0.0f;
855  connectConfigMap[connId].stdpConfig.OMEGA = 0.0f;
856  connectConfigMap[connId].stdpConfig.NM_PKA = nm_pka;
857  connectConfigMap[connId].stdpConfig.NM_PLC = nm_plc;
858  connectConfigMap[connId].stdpConfig.W_PKA = w_pka;
859  connectConfigMap[connId].stdpConfig.W_PLC = w_plc;
860  // set flags for STDP function
861  connectConfigMap[connId].stdpConfig.WithESTDPtype = type;
862  connectConfigMap[connId].stdpConfig.WithESTDPcurve = curve;
863  connectConfigMap[connId].stdpConfig.WithESTDP = isSet;
864  connectConfigMap[connId].stdpConfig.WithSTDP |= connectConfigMap[connId].stdpConfig.WithESTDP;
865  sim_with_stdp |= connectConfigMap[connId].stdpConfig.WithSTDP;
866 
867  groupConfigMap[postGrpId].WithSTDP |= connectConfigMap[connId].stdpConfig.WithSTDP;
868  groupConfigMap[postGrpId].WithDA_MOD |= (type == DA_MOD);
869  groupConfigMap[postGrpId].WithPKA_PLC_MOD |= (type == PKA_PLC_MOD);
870 
871  KERNEL_INFO("PKA/PLC E-STDP %s for %s(%d) to %s(%d)", isSet ? "enabled" : "disabled", groupConfigMap[preGrpId].grpName.c_str(), preGrpId,
872  groupConfigMap[postGrpId].grpName.c_str(), postGrpId);
873 }
874 
875 
876 //
877 void SNN::setConnectionModulation(int preGrpId, int postGrpId, IcalcType icalcType)
878 {
879  assert(preGrpId >= -1);
880  assert(postGrpId >= -1);
881 
882  short int connId = getConnectId(preGrpId, postGrpId);
883 
884  connectConfigMap[connId].icalcType = icalcType;
885 }
886 #endif
887 
888 
889 // set ISTDP params
890 void SNN::setISTDP(int preGrpId, int postGrpId, bool isSet, STDPType type, STDPCurve curve, float ab1, float ab2, float tau1, float tau2) {
891  assert(preGrpId >= -1);
892  assert(postGrpId >= -1);
893  assert(IS_INHIBITORY_TYPE(groupConfigMap[preGrpId].type) == true);
894 
895  if (isSet) {
896  assert(type != UNKNOWN_STDP);
897  assert(tau1 > 0); assert(tau2 > 0);
898  }
899 
900  short int connId = getConnectId(preGrpId, postGrpId);
901  if (connId < 0) {
902  KERNEL_ERROR("No connection found from group %d(%s) to group %d(%s)", preGrpId, getGroupName(preGrpId).c_str(),
903  postGrpId, getGroupName(postGrpId).c_str());
905  }
906 
907  // set STDP for a given connection
908  // set params for STDP curve
909  if (curve == EXP_CURVE) {
910  connectConfigMap[connId].stdpConfig.ALPHA_PLUS_INB = ab1;
911  connectConfigMap[connId].stdpConfig.ALPHA_MINUS_INB = ab2;
912  connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB = 1.0f / tau1;
913  connectConfigMap[connId].stdpConfig.TAU_MINUS_INV_INB = 1.0f / tau2;
914  connectConfigMap[connId].stdpConfig.BETA_LTP = 0.0f;
915  connectConfigMap[connId].stdpConfig.BETA_LTD = 0.0f;
916  connectConfigMap[connId].stdpConfig.LAMBDA = 1.0f;
917  connectConfigMap[connId].stdpConfig.DELTA = 1.0f;
918  } else {
919  connectConfigMap[connId].stdpConfig.ALPHA_PLUS_INB = 0.0f;
920  connectConfigMap[connId].stdpConfig.ALPHA_MINUS_INB = 0.0f;
921  connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB = 1.0f;
922  connectConfigMap[connId].stdpConfig.TAU_MINUS_INV_INB = 1.0f;
923  connectConfigMap[connId].stdpConfig.BETA_LTP = ab1;
924  connectConfigMap[connId].stdpConfig.BETA_LTD = ab2;
925  connectConfigMap[connId].stdpConfig.LAMBDA = tau1;
926  connectConfigMap[connId].stdpConfig.DELTA = tau2;
927  }
928  // set flags for STDP function
929  //FIXME: separate STDPType to ESTDPType and ISTDPType
930  connectConfigMap[connId].stdpConfig.WithISTDPtype = type;
931  connectConfigMap[connId].stdpConfig.WithISTDPcurve = curve;
932  connectConfigMap[connId].stdpConfig.WithISTDP = isSet;
933  connectConfigMap[connId].stdpConfig.WithSTDP |= connectConfigMap[connId].stdpConfig.WithISTDP;
934  sim_with_stdp |= connectConfigMap[connId].stdpConfig.WithSTDP;
935 
936  groupConfigMap[postGrpId].WithSTDP |= connectConfigMap[connId].stdpConfig.WithSTDP;
937  groupConfigMap[postGrpId].WithDA_MOD |= (type == DA_MOD);
938 #ifdef LN_I_CALC_TYPES
939  groupConfigMap[postGrpId].WithPKA_PLC_MOD |= (type == PKA_PLC_MOD);
940 #endif
941  KERNEL_INFO("I-STDP %s for %s(%d) to %s(%d)", isSet?"enabled":"disabled", groupConfigMap[preGrpId].grpName.c_str(), preGrpId,
942  groupConfigMap[postGrpId].grpName.c_str(), postGrpId);
943 }
944 
945 // set STP params
946 void SNN::setSTP(int gGrpId, bool isSet, float STP_U, float STP_tau_u, float STP_tau_x) {
947  assert(gGrpId >= -1);
948  if (isSet) {
949  assert(STP_U > 0 && STP_U <= 1); assert(STP_tau_u > 0); assert(STP_tau_x > 0);
950  }
951 
952  if (gGrpId == ALL) { // shortcut for all groups
953  for(int grpId = 0; grpId < numGroups; grpId++) {
954  setSTP(grpId, isSet, STP_U, STP_tau_u, STP_tau_x);
955  }
956  } else {
957  // set STP for a given group
958  sim_with_stp |= isSet;
959  groupConfigMap[gGrpId].stpConfig.WithSTP = isSet;
960  groupConfigMap[gGrpId].stpConfig.STP_A = (STP_U > 0.0f) ? 1.0 / STP_U : 1.0f; // scaling factor
961  groupConfigMap[gGrpId].stpConfig.STP_U = STP_U;
962  groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv = 1.0f / STP_tau_u; // facilitatory
963  groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv = 1.0f / STP_tau_x; // depressive
964 
965  KERNEL_INFO("STP %s for %d (%s):\tA: %1.4f, U: %1.4f, tau_u: %4.0f, tau_x: %4.0f", isSet?"enabled":"disabled",
966  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMap[gGrpId].stpConfig.STP_A, STP_U, STP_tau_u, STP_tau_x);
967  }
968 }
969 
970 
971 #ifdef LN_I_CALC_TYPES
972 void SNN::setNM4STP(int gGrpId, float wSTP_U[], float wSTP_tau_u[], float wSTP_tau_x[]) {
973  assert(gGrpId >= -1);
974  if (!groupConfigMap[gGrpId].stpConfig.WithSTP) {
975  KERNEL_ERROR("STP must be set to be targeted");
976  }
977 
978  auto& config = groupConfigMap[gGrpId].nm4StpConfig;
979 
980  config.WithNM4STP = true;
981 
982  for (int i = 0; i < NM_NE + 3; i++) {
983  config.w_STP_U[i] = wSTP_U[i];
984  config.w_STP_tau_u[i] = wSTP_tau_u[i];
985  config.w_STP_tau_x[i] = wSTP_tau_x[i];
986  }
987 
988 }
989 #endif
990 
991 
992 void SNN::setWeightAndWeightChangeUpdate(UpdateInterval wtANDwtChangeUpdateInterval, bool enableWtChangeDecay, float wtChangeDecay) {
993  assert(wtChangeDecay > 0.0f && wtChangeDecay < 1.0f);
994 
995  switch (wtANDwtChangeUpdateInterval) {
996  case INTERVAL_10MS:
997  wtANDwtChangeUpdateInterval_ = 10;
998  break;
999  case INTERVAL_100MS:
1000  wtANDwtChangeUpdateInterval_ = 100;
1001  break;
1002  case INTERVAL_1000MS:
1003  default:
1004  wtANDwtChangeUpdateInterval_ = 1000;
1005  break;
1006  }
1007 
1008  if (enableWtChangeDecay) {
1009  // set up stdp factor according to update interval
1010  switch (wtANDwtChangeUpdateInterval) {
1011  case INTERVAL_10MS:
1012  stdpScaleFactor_ = 0.005f;
1013  break;
1014  case INTERVAL_100MS:
1015  stdpScaleFactor_ = 0.05f;
1016  break;
1017  case INTERVAL_1000MS:
1018  default:
1019  stdpScaleFactor_ = 0.5f;
1020  break;
1021  }
1022  // set up weight decay
1023  wtChangeDecay_ = wtChangeDecay;
1024  } else {
1025  stdpScaleFactor_ = 1.0f;
1026  wtChangeDecay_ = 0.0f;
1027  }
1028 
1029  KERNEL_INFO("Update weight and weight change every %d ms", wtANDwtChangeUpdateInterval_);
1030  KERNEL_INFO("Weight Change Decay is %s", enableWtChangeDecay? "enabled" : "disable");
1031  KERNEL_INFO("STDP scale factor = %1.3f, wtChangeDecay = %1.3f", stdpScaleFactor_, wtChangeDecay_);
1032 }
1033 
1037 
1038 
1039 
1040 // reorganize the network and do the necessary allocation
1041 // of all variable for carrying out the simulation..
1042 // this code is run only one time during network initialization
1044  switch (snnState) {
1045  case CONFIG_SNN:
1046  compileSNN();
1047  case COMPILED_SNN:
1048  partitionSNN();
1049  case PARTITIONED_SNN:
1050  generateRuntimeSNN();
1051  break;
1052  case EXECUTABLE_SNN:
1053  break;
1054  default:
1055  KERNEL_ERROR("Unknown SNN state");
1056  break;
1057  }
1058 }
1059 
1060 #ifdef LN_SETUP_NETWORK_MT
1061 // LN Test
1062 #include <chrono>
1063 // reorganize the network and do the necessary allocation
1064 // of all variable for carrying out the simulation..
1065 // this code is run only one time during network initialization
1066 void SNN::setupNetworkMT() {
1067  auto t0 = std::chrono::steady_clock::now();
1068  auto t1 = std::chrono::steady_clock::now();
1069  std::chrono::duration<double> d = t1 - t0; // \todo double
1070  switch (snnState) {
1071  case CONFIG_SNN:
1072  t0 = std::chrono::steady_clock::now();
1073  compileSNN();
1074  t1 = std::chrono::steady_clock::now();
1075  d = t1 - t0;
1076  printf("compileSNN: %.1fs\n", d);
1077  case COMPILED_SNN:
1078  t0 = std::chrono::steady_clock::now();
1079  partitionSNNMT();
1080  t1 = std::chrono::steady_clock::now();
1081  d = t1 - t0;
1082  printf("partitionSNNMT: %.1fs\n", d);
1083  case PARTITIONED_SNN:
1084  t0 = std::chrono::steady_clock::now();
1085  generateRuntimeSNN();
1086  t1 = std::chrono::steady_clock::now();
1087  d = t1 - t0;
1088  printf("generateRuntimeSNN: %.1fs\n", d);
1089  break;
1090  case EXECUTABLE_SNN:
1091  break;
1092  default:
1093  KERNEL_ERROR("Unknown SNN state");
1094  break;
1095  }
1096 }
1097 #endif
1098 
1102 
1103 int SNN::runNetwork(int _nsec, int _nmsec, bool printRunSummary) {
1104  assert(_nmsec >= 0 && _nmsec < 1000);
1105  assert(_nsec >= 0);
1106  int runDurationMs = _nsec*1000 + _nmsec;
1107  KERNEL_DEBUG("runNetwork: runDur=%dms, printRunSummary=%s", runDurationMs, printRunSummary?"y":"n");
1108 
1109  // setupNetwork() must have already been called
1110  assert(snnState == EXECUTABLE_SNN);
1111 
1112  // don't bother printing if logger mode is SILENT
1113  printRunSummary = (loggerMode_==SILENT) ? false : printRunSummary;
1114 
1115  // first-time run: inform the user the simulation is running now
1116  if (simTime==0 && printRunSummary) {
1117  KERNEL_INFO("");
1118  KERNEL_INFO("******************** Running the simulation on %d GPU(s) and %d CPU(s) ***************************", numGPUs, numCores);
1119  KERNEL_INFO("");
1120  }
1121 
1122  // reset all spike counters
1123  resetSpikeCnt(ALL);
1124 
1125  // store current start time for future reference
1126  simTimeRunStart = simTime;
1127  simTimeRunStop = simTime + runDurationMs;
1128  assert(simTimeRunStop >= simTimeRunStart); // check for arithmetic underflow
1129 
1130  // ConnectionMonitor is a special case: we might want the first snapshot at t=0 in the binary
1131  // but updateTime() is false for simTime==0.
1132  // And we cannot put this code in ConnectionMonitorCore::init, because then the user would have no
1133  // way to call ConnectionMonitor::setUpdateTimeIntervalSec before...
1134  if (simTime == 0 && numConnectionMonitor) {
1136  }
1137 
1138  // set the Poisson generation time slice to be at the run duration up to MAX_TIME_SLICE
1139 #if defined(WIN32) && defined(__NO_CUDA__) // LN2021 fix gcc
1140  setGrpTimeSlice(ALL, std::max<int>(1, std::min<int>(runDurationMs, MAX_TIME_SLICE))); // LN2021 Fix Issue illegal token, unknown-type c++17
1141 #else
1142  setGrpTimeSlice(ALL, std::max(1, std::min(runDurationMs, MAX_TIME_SLICE))); // LN2021 Fix Issue illegal token, unknown-type c++17 // gcc
1143 #endif
1144 #ifndef __NO_CUDA__
1145  CUDA_RESET_TIMER(timer);
1146  CUDA_START_TIMER(timer);
1147 #endif
1148 
1149  //KERNEL_INFO("Reached the advSimStep loop!");
1150 
1151  // if nsec=0, simTimeMs=10, we need to run the simulator for 10 timeStep;
1152  // if nsec=1, simTimeMs=10, we need to run the simulator for 1*1000+10, time Step;
1153  for(int i = 0; i < runDurationMs; i++) {
1154  advSimStep();
1155  //KERNEL_INFO("Executed an advSimStep!");
1156 
1157  // update weight every updateInterval ms if plastic synapses present
1158  if (!sim_with_fixedwts && wtANDwtChangeUpdateInterval_ == ++wtANDwtChangeUpdateIntervalCnt_) {
1159  wtANDwtChangeUpdateIntervalCnt_ = 0; // reset counter
1160  if (!sim_in_testing) {
1161  // keep this if statement separate from the above, so that the counter is updated correctly
1162  updateWeights();
1163  }
1164  }
1165 
1166  // Note: updateTime() advance simTime, simTimeMs, and simTimeSec accordingly
1167  if (updateTime()) {
1168  // finished one sec of simulation...
1169  if (numSpikeMonitor) {
1171  }
1172  if (numGroupMonitor) {
1174  }
1175  if (numConnectionMonitor) {
1177  }
1178  if (numNeuronMonitor) {
1180  }
1181 
1182  shiftSpikeTables();
1183  }
1184 
1185  fetchNeuronSpikeCount(ALL);
1186  }
1187 
1188  //KERNEL_INFO("Updated monitors!");
1189 
1190  // user can opt to display some runNetwork summary
1191  if (printRunSummary) {
1192 
1193  // if there are Monitors available and it's time to show the log, print status for each group
1194  if (numSpikeMonitor) {
1195  printStatusSpikeMonitor(ALL);
1196  }
1197  if (numConnectionMonitor) {
1198  printStatusConnectionMonitor(ALL);
1199  }
1200  if (numGroupMonitor) {
1201  printStatusGroupMonitor(ALL);
1202  }
1203 
1204  // record time of run summary print
1205  simTimeLastRunSummary = simTime;
1206  }
1207 
1208  // call updateSpike(Group)Monitor again to fetch all the left-over spikes and group status (neuromodulator)
1211 
1212  // keep track of simulation time...
1213 #ifndef __NO_CUDA__
1214  CUDA_STOP_TIMER(timer);
1215  lastExecutionTime = CUDA_GET_TIMER_VALUE(timer);
1216  cumExecutionTime += lastExecutionTime;
1217 #endif
1218  return 0;
1219 }
1220 
1221 
1222 
1226 
1227 // adds a bias to every weight in the connection
1228 void SNN::biasWeights(short int connId, float bias, bool updateWeightRange) {
1229  assert(connId>=0 && connId<numConnections);
1230 
1231  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1232  int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1233 
1234  fetchPreConnectionInfo(netId);
1235  fetchConnIdsLookupArray(netId);
1236  fetchSynapseState(netId);
1237  // iterate over all postsynaptic neurons
1238  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
1239  unsigned int cumIdx = managerRuntimeData.cumulativePre[lNId];
1240 
1241  // iterate over all presynaptic neurons
1242  unsigned int pos_ij = cumIdx;
1243  for (int j = 0; j < managerRuntimeData.Npre[lNId]; pos_ij++, j++) {
1244  if (managerRuntimeData.connIdsPreIdx[pos_ij] == connId) {
1245  // apply bias to weight
1246  float weight = managerRuntimeData.wt[pos_ij] + bias;
1247 
1248  // inform user of acton taken if weight is out of bounds
1249 // bool needToPrintDebug = (weight+bias>connInfo->maxWt || weight+bias<connInfo->minWt);
1250  bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
1251 
1252  if (updateWeightRange) {
1253  // if this flag is set, we need to update minWt,maxWt accordingly
1254  // will be saving new maxSynWt and copying to GPU below
1255 // connInfo->minWt = fmin(connInfo->minWt, weight);
1256 #if defined(WIN32) && defined(__NO_CUDA__) // LN2021 fix gcc
1257  connectConfigMap[connId].maxWt = std::max<float>(connectConfigMap[connId].maxWt, weight);
1258 #else
1259  connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
1260 #endif
1261  if (needToPrintDebug) {
1262  KERNEL_DEBUG("biasWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, bias,
1263  (updateWeightRange?"true":"false"), 0.0f, connectConfigMap[connId].maxWt);
1264  }
1265  } else {
1266  // constrain weight to boundary values
1267  // compared to above, we swap minWt/maxWt logic
1268 #if defined(WIN32) && defined(__NO_CUDA__) // LN2021 fix gcc
1269  weight = std::min<float>(weight, connectConfigMap[connId].maxWt);
1270 #else
1271  weight = std::min(weight, connectConfigMap[connId].maxWt); // LN2021 gcc, Workaround for VC no longer neccessary
1272 #endif
1273  // weight = fmax(weight, connInfo->minWt);
1274 #if defined(WIN32) && defined(__NO_CUDA__) // LN2021 fix gcc
1275  weight = std::max<float>(weight, 0.0f);
1276 #else
1277  weight = std::max(weight, 0.0f);
1278 #endif
1279  if (needToPrintDebug) {
1280  KERNEL_DEBUG("biasWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, bias,
1281  (updateWeightRange?"true":"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
1282  }
1283  }
1284 
1285  // update datastructures
1286  managerRuntimeData.wt[pos_ij] = weight;
1287  managerRuntimeData.maxSynWt[pos_ij] = connectConfigMap[connId].maxWt; // it's easier to just update, even if it hasn't changed
1288  }
1289  }
1290 
1291  // update GPU datastructures in batches, grouped by post-neuron
1292  if (netId < CPU_RUNTIME_BASE) {
1293 #ifndef __NO_CUDA__
1294  CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].wt[cumIdx]), &(managerRuntimeData.wt[cumIdx]), sizeof(float)*managerRuntimeData.Npre[lNId],
1295  cudaMemcpyHostToDevice) );
1296 
1297  if (runtimeData[netId].maxSynWt != NULL) {
1298  // only copy maxSynWt if datastructure actually exists on the GPU runtime
1299  // (that logic should be done elsewhere though)
1300  CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].maxSynWt[cumIdx]), &(managerRuntimeData.maxSynWt[cumIdx]),
1301  sizeof(float) * managerRuntimeData.Npre[lNId], cudaMemcpyHostToDevice) );
1302  }
1303 #else
1304  assert(false);
1305 #endif
1306  } else {
1307  memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1308 
1309  if (runtimeData[netId].maxSynWt != NULL) {
1310  // only copy maxSynWt if datastructure actually exists on the CPU runtime
1311  // (that logic should be done elsewhere though)
1312  memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1313  }
1314  }
1315  }
1316 }
1317 
1318 // deallocates dynamical structures and exits
1319 void SNN::exitSimulation(int val) {
1320  deleteObjects();
1321  exit(val);
1322 }
1323 
1324 // reads network state from file
1325 void SNN::loadSimulation(FILE* fid) {
1326  loadSimFID = fid;
1327 }
1328 
1329 // multiplies every weight with a scaling factor
1330 void SNN::scaleWeights(short int connId, float scale, bool updateWeightRange) {
1331  assert(connId>=0 && connId<numConnections);
1332  assert(scale>=0.0f);
1333 
1334  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1335  int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1336 
1337  fetchPreConnectionInfo(netId);
1338  fetchConnIdsLookupArray(netId);
1339  fetchSynapseState(netId);
1340 
1341  // iterate over all postsynaptic neurons
1342  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
1343  unsigned int cumIdx = managerRuntimeData.cumulativePre[lNId];
1344 
1345  // iterate over all presynaptic neurons
1346  unsigned int pos_ij = cumIdx;
1347  for (int j = 0; j < managerRuntimeData.Npre[lNId]; pos_ij++, j++) {
1348  if (managerRuntimeData.connIdsPreIdx[pos_ij]==connId) {
1349  // apply bias to weight
1350  float weight = managerRuntimeData.wt[pos_ij] * scale;
1351 
1352  // inform user of acton taken if weight is out of bounds
1353 // bool needToPrintDebug = (weight>connInfo->maxWt || weight<connInfo->minWt);
1354  bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
1355 
1356  if (updateWeightRange) {
1357  // if this flag is set, we need to update minWt,maxWt accordingly
1358  // will be saving new maxSynWt and copying to GPU below
1359 // connInfo->minWt = fmin(connInfo->minWt, weight);
1360 #if defined(WIN32) && defined(__NO_CUDA__) // LN2021 fix gcc
1361  connectConfigMap[connId].maxWt = std::max<float>(connectConfigMap[connId].maxWt, weight);
1362 #else
1363  connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
1364 #endif
1365  if (needToPrintDebug) {
1366  KERNEL_DEBUG("scaleWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, scale,
1367  (updateWeightRange?"true":"false"), 0.0f, connectConfigMap[connId].maxWt);
1368  }
1369  } else {
1370  // constrain weight to boundary values
1371  // compared to above, we swap minWt/maxWt logic
1372 #if defined(WIN32) && defined(__NO_CUDA__) // LN2021 fix gcc
1373  weight = std::min<float>(weight, connectConfigMap[connId].maxWt);
1374 #else
1375  weight = std::min(weight, connectConfigMap[connId].maxWt); // LN2021 gcc
1376 #endif
1377 // weight = fmax(weight, connInfo->minWt);
1378 #if defined(WIN32) && defined(__NO_CUDA__) // LN2021 fix gcc
1379  weight = std::max<float>(weight, 0.0f); // \todo Issue
1380 #else
1381  weight = std::max(weight, 0.0f); // \todo Issue lower bound
1382 #endif
1383  if (needToPrintDebug) {
1384  KERNEL_DEBUG("scaleWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, scale,
1385  (updateWeightRange?"true":"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
1386  }
1387  }
1388 
1389  // update datastructures
1390  managerRuntimeData.wt[pos_ij] = weight;
1391  managerRuntimeData.maxSynWt[pos_ij] = connectConfigMap[connId].maxWt; // it's easier to just update, even if it hasn't changed
1392  }
1393  }
1394 
1395  // update GPU datastructures in batches, grouped by post-neuron
1396  if (netId < CPU_RUNTIME_BASE) {
1397 #ifndef __NO_CUDA__
1398  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float)*managerRuntimeData.Npre[lNId],
1399  cudaMemcpyHostToDevice));
1400 
1401  if (runtimeData[netId].maxSynWt != NULL) {
1402  // only copy maxSynWt if datastructure actually exists on the GPU runtime
1403  // (that logic should be done elsewhere though)
1404  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx],
1405  sizeof(float) * managerRuntimeData.Npre[lNId], cudaMemcpyHostToDevice));
1406  }
1407 #else
1408  assert(false);
1409 #endif
1410  } else {
1411  memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1412 
1413  if (runtimeData[netId].maxSynWt != NULL) {
1414  // only copy maxSynWt if datastructure actually exists on the CPU runtime
1415  // (that logic should be done elsewhere though)
1416  memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1417  }
1418  }
1419  }
1420 }
1421 
1422 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
1423 // or groupConfigMap is not sync with groupConfigs[0][]
1424 GroupMonitor* SNN::setGroupMonitor(int gGrpId, FILE* fid, int mode) {
1425  int netId = groupConfigMDMap[gGrpId].netId;
1426  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1427 
1428  // check whether group already has a GroupMonitor
1429  if (groupConfigMDMap[gGrpId].groupMonitorId >= 0) {
1430  KERNEL_ERROR("setGroupMonitor has already been called on Group %d (%s).", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1432  }
1433 
1434  // create new GroupMonitorCore object in any case and initialize analysis components
1435  // grpMonObj destructor (see below) will deallocate it
1436  GroupMonitorCore* grpMonCoreObj = new GroupMonitorCore(this, numGroupMonitor, gGrpId, mode);
1437  groupMonCoreList[numGroupMonitor] = grpMonCoreObj;
1438 
1439  // assign group status file ID if we selected to write to a file, else it's NULL
1440  // if file pointer exists, it has already been fopened
1441  // this will also write the header section of the group status file
1442  // grpMonCoreObj destructor will fclose it
1443  grpMonCoreObj->setGroupFileId(fid);
1444 
1445  // create a new GroupMonitor object for the user-interface
1446  // SNN::deleteObjects will deallocate it
1447  GroupMonitor* grpMonObj = new GroupMonitor(grpMonCoreObj);
1448  groupMonList[numGroupMonitor] = grpMonObj;
1449 
1450  // also inform the group that it is being monitored...
1451  groupConfigMDMap[gGrpId].groupMonitorId = numGroupMonitor;
1452 
1453  numGroupMonitor++;
1454  KERNEL_INFO("GroupMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1455 
1456  return grpMonObj;
1457 }
1458 
1459 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where group(connect)Config[] might not be available
1460 // or group(connect)ConfigMap is not sync with group(connect)Config[]
1461 ConnectionMonitor* SNN::setConnectionMonitor(int grpIdPre, int grpIdPost, FILE* fid) {
1462  // find connection based on pre-post pair
1463  short int connId = getConnectId(grpIdPre, grpIdPost);
1464  if (connId<0) {
1465  KERNEL_ERROR("No connection found from group %d(%s) to group %d(%s)", grpIdPre, getGroupName(grpIdPre).c_str(),
1466  grpIdPost, getGroupName(grpIdPost).c_str());
1468  }
1469 
1470  // check whether connection already has a connection monitor
1471  if (connectConfigMap[connId].connectionMonitorId >= 0) {
1472  KERNEL_ERROR("setConnectionMonitor has already been called on Connection %d (MonitorId=%d)", connId, connectConfigMap[connId].connectionMonitorId);
1474  }
1475 
1476  // inform the connection that it is being monitored...
1477  // this needs to be called before new ConnectionMonitorCore
1478  connectConfigMap[connId].connectionMonitorId = numConnectionMonitor;
1479 
1480  // create new ConnectionMonitorCore object in any case and initialize
1481  // connMonObj destructor (see below) will deallocate it
1482  ConnectionMonitorCore* connMonCoreObj = new ConnectionMonitorCore(this, numConnectionMonitor, connId,
1483  grpIdPre, grpIdPost);
1484  connMonCoreList[numConnectionMonitor] = connMonCoreObj;
1485 
1486  // assign conn file ID if we selected to write to a file, else it's NULL
1487  // if file pointer exists, it has already been fopened
1488  // this will also write the header section of the conn file
1489  // connMonCoreObj destructor will fclose it
1490  connMonCoreObj->setConnectFileId(fid);
1491 
1492  // create a new ConnectionMonitor object for the user-interface
1493  // SNN::deleteObjects will deallocate it
1494  ConnectionMonitor* connMonObj = new ConnectionMonitor(connMonCoreObj);
1495  connMonList[numConnectionMonitor] = connMonObj;
1496 
1497  // now init core object (depends on several datastructures allocated above)
1498  connMonCoreObj->init();
1499 
1500  numConnectionMonitor++;
1501  KERNEL_INFO("ConnectionMonitor %d set for Connection %d: %d(%s) => %d(%s)", connectConfigMap[connId].connectionMonitorId, connId, grpIdPre, getGroupName(grpIdPre).c_str(),
1502  grpIdPost, getGroupName(grpIdPost).c_str());
1503 
1504  return connMonObj;
1505 }
1506 
1507 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
1508 // or groupConfigMap is not sync with groupConfigs[0][]
1509 // sets up a spike generator
1510 void SNN::setSpikeGenerator(int gGrpId, SpikeGeneratorCore* spikeGenFunc) {
1511  assert(snnState == CONFIG_SNN); // must be called before setupNetwork() to work on GPU
1512  assert(spikeGenFunc);
1513  assert(groupConfigMap[gGrpId].isSpikeGenerator);
1514  groupConfigMap[gGrpId].spikeGenFunc = spikeGenFunc;
1515 }
1516 
1517 // record spike information, return a SpikeInfo object
1518 SpikeMonitor* SNN::setSpikeMonitor(int gGrpId, FILE* fid) {
1519  // check whether group already has a SpikeMonitor
1520  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1521  // in this case, return the current object and update fid
1522  SpikeMonitor* spkMonObj = getSpikeMonitor(gGrpId);
1523 
1524  // update spike file ID
1525  SpikeMonitorCore* spkMonCoreObj = getSpikeMonitorCore(gGrpId);
1526  spkMonCoreObj->setSpikeFileId(fid);
1527 
1528  KERNEL_INFO("SpikeMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1529  return spkMonObj;
1530  } else {
1531  // create new SpikeMonitorCore object in any case and initialize analysis components
1532  // spkMonObj destructor (see below) will deallocate it
1533  SpikeMonitorCore* spkMonCoreObj = new SpikeMonitorCore(this, numSpikeMonitor, gGrpId);
1534  spikeMonCoreList[numSpikeMonitor] = spkMonCoreObj;
1535 
1536  // assign spike file ID if we selected to write to a file, else it's NULL
1537  // if file pointer exists, it has already been fopened
1538  // this will also write the header section of the spike file
1539  // spkMonCoreObj destructor will fclose it
1540  spkMonCoreObj->setSpikeFileId(fid);
1541 
1542  // create a new SpikeMonitor object for the user-interface
1543  // SNN::deleteObjects will deallocate it
1544  SpikeMonitor* spkMonObj = new SpikeMonitor(spkMonCoreObj);
1545  spikeMonList[numSpikeMonitor] = spkMonObj;
1546 
1547  // also inform the grp that it is being monitored...
1548  groupConfigMDMap[gGrpId].spikeMonitorId = numSpikeMonitor;
1549 
1550  numSpikeMonitor++;
1551  KERNEL_INFO("SpikeMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1552 
1553  return spkMonObj;
1554  }
1555 }
1556 
1557 // record neuron state information, return a NeuronInfo object
1558 NeuronMonitor* SNN::setNeuronMonitor(int gGrpId, FILE* fid) {
1559  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1560  int netId = groupConfigMDMap[gGrpId].netId;
1561 
1562  if (getGroupNumNeurons(gGrpId) > 128) {
1563  KERNEL_WARN("Due to limited memory space, only the first 128 neurons can be monitored by NeuronMonitor");
1564  }
1565 
1566  // check whether group already has a NeuronMonitor
1567  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1568  // in this case, return the current object and update fid
1569  NeuronMonitor* nrnMonObj = getNeuronMonitor(gGrpId);
1570 
1571  // update neuron file ID
1572  NeuronMonitorCore* nrnMonCoreObj = getNeuronMonitorCore(gGrpId);
1573  nrnMonCoreObj->setNeuronFileId(fid);
1574 
1575  KERNEL_INFO("NeuronMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1576  return nrnMonObj;
1577  } else {
1578  // create new NeuronMonitorCore object in any case and initialize analysis components
1579  // nrnMonObj destructor (see below) will deallocate it
1580  NeuronMonitorCore* nrnMonCoreObj = new NeuronMonitorCore(this, numNeuronMonitor, gGrpId);
1581  neuronMonCoreList[numNeuronMonitor] = nrnMonCoreObj;
1582 
1583  // assign neuron state file ID if we selected to write to a file, else it's NULL
1584  // if file pointer exists, it has already been fopened
1585  // this will also write the header section of the spike file
1586  // spkMonCoreObj destructor will fclose it
1587  nrnMonCoreObj->setNeuronFileId(fid);
1588 
1589  // create a new NeuronMonitor object for the user-interface
1590  // SNN::deleteObjects will deallocate it
1591  NeuronMonitor* nrnMonObj = new NeuronMonitor(nrnMonCoreObj);
1592  neuronMonList[numNeuronMonitor] = nrnMonObj;
1593 
1594  // also inform the grp that it is being monitored...
1595  groupConfigMDMap[gGrpId].neuronMonitorId = numNeuronMonitor;
1596 
1597  numNeuronMonitor++;
1598  KERNEL_INFO("NeuronMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1599 
1600  return nrnMonObj;
1601  }
1602 }
1603 
1604 // FIXME: distinguish the function call at CONFIG_STATE and RUN_STATE, where groupConfigs[0][] might not be available
1605 // or groupConfigMap is not sync with groupConfigs[0][]
1606 // assigns spike rate to group
1607 void SNN::setSpikeRate(int gGrpId, PoissonRate* ratePtr, int refPeriod) {
1608  int netId = groupConfigMDMap[gGrpId].netId;
1609  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1610 
1611  assert(gGrpId >= 0 && lGrpId < networkConfigs[netId].numGroups);
1612  assert(ratePtr);
1613  assert(groupConfigMap[gGrpId].isSpikeGenerator);
1614  assert(ratePtr->getNumNeurons() == groupConfigMap[gGrpId].numN);
1615  assert(refPeriod >= 1);
1616 
1617  groupConfigMDMap[gGrpId].ratePtr = ratePtr;
1618  groupConfigMDMap[gGrpId].refractPeriod = refPeriod;
1619  spikeRateUpdated = true;
1620 }
1621 
1622 // sets the weight value of a specific synapse
1623 void SNN::setWeight(short int connId, int neurIdPre, int neurIdPost, float weight, bool updateWeightRange) {
1624  assert(connId>=0 && connId<getNumConnections());
1625  assert(weight>=0.0f);
1626 
1627  assert(neurIdPre >= 0 && neurIdPre < getGroupNumNeurons(connectConfigMap[connId].grpSrc));
1628  assert(neurIdPost >= 0 && neurIdPost < getGroupNumNeurons(connectConfigMap[connId].grpDest));
1629 
1630  float maxWt = fabs(connectConfigMap[connId].maxWt);
1631  float minWt = 0.0f;
1632 
1633  // inform user of acton taken if weight is out of bounds
1634  bool needToPrintDebug = (weight>maxWt || weight<minWt);
1635 
1636  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1637  int postlGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1638  int prelGrpId = groupConfigMDMap[connectConfigMap[connId].grpSrc].lGrpId;
1639 
1640  fetchPreConnectionInfo(netId);
1641  fetchConnIdsLookupArray(netId);
1642  fetchSynapseState(netId);
1643 
1644  if (updateWeightRange) {
1645  // if this flag is set, we need to update minWt,maxWt accordingly
1646  // will be saving new maxSynWt and copying to GPU below
1647 // connInfo->minWt = fmin(connInfo->minWt, weight);
1648  maxWt = fmax(maxWt, weight);
1649  if (needToPrintDebug) {
1650  KERNEL_DEBUG("setWeight(%d,%d,%d,%f,%s): updated weight ranges to [%f,%f]", connId, neurIdPre, neurIdPost,
1651  weight, (updateWeightRange?"true":"false"), minWt, maxWt);
1652  }
1653  } else {
1654  // constrain weight to boundary values
1655  // compared to above, we swap minWt/maxWt logic
1656  weight = fmin(weight, maxWt);
1657  weight = fmax(weight, minWt);
1658  if (needToPrintDebug) {
1659  KERNEL_DEBUG("setWeight(%d,%d,%d,%f,%s): constrained weight %f to [%f,%f]", connId, neurIdPre, neurIdPost,
1660  weight, (updateWeightRange?"true":"false"), weight, minWt, maxWt);
1661  }
1662  }
1663 
1664  // find real ID of pre- and post-neuron
1665  int neurIdPreReal = groupConfigs[netId][prelGrpId].lStartN + neurIdPre;
1666  int neurIdPostReal = groupConfigs[netId][postlGrpId].lStartN + neurIdPost;
1667 
1668  // iterate over all presynaptic synapses until right one is found
1669  bool synapseFound = false;
1670  int pos_ij = managerRuntimeData.cumulativePre[neurIdPostReal];
1671  for (int j = 0; j < managerRuntimeData.Npre[neurIdPostReal]; pos_ij++, j++) {
1672  SynInfo* preId = &(managerRuntimeData.preSynapticIds[pos_ij]);
1673  int pre_nid = GET_CONN_NEURON_ID((*preId));
1674  if (GET_CONN_NEURON_ID((*preId)) == neurIdPreReal) {
1675  assert(managerRuntimeData.connIdsPreIdx[pos_ij] == connId); // make sure we've got the right connection ID
1676 
1677  managerRuntimeData.wt[pos_ij] = isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? weight : -1.0 * weight;
1678  managerRuntimeData.maxSynWt[pos_ij] = isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? maxWt : -1.0 * maxWt;
1679 
1680  if (netId < CPU_RUNTIME_BASE) {
1681 #ifndef __NO_CUDA__
1682  // need to update datastructures on GPU runtime
1683  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.wt[pos_ij], sizeof(float), cudaMemcpyHostToDevice));
1684  if (runtimeData[netId].maxSynWt != NULL) {
1685  // only copy maxSynWt if datastructure actually exists on the GPU runtime
1686  // (that logic should be done elsewhere though)
1687  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.maxSynWt[pos_ij], sizeof(float), cudaMemcpyHostToDevice));
1688  }
1689 #else
1690  assert(false);
1691 #endif
1692  } else {
1693  // need to update datastructures on CPU runtime
1694  memcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.wt[pos_ij], sizeof(float));
1695  if (runtimeData[netId].maxSynWt != NULL) {
1696  // only copy maxSynWt if datastructure actually exists on the CPU runtime
1697  // (that logic should be done elsewhere though)
1698  memcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.maxSynWt[pos_ij], sizeof(float));
1699  }
1700  }
1701 
1702  // synapse found and updated: we're done!
1703  synapseFound = true;
1704  break;
1705  }
1706  }
1707 
1708  if (!synapseFound) {
1709  KERNEL_WARN("setWeight(%d,%d,%d,%f,%s): Synapse does not exist, not updated.", connId, neurIdPre, neurIdPost,
1710  weight, (updateWeightRange?"true":"false"));
1711  }
1712 }
1713 
1714 void SNN::setExternalCurrent(int grpId, const std::vector<float>& current) {
1715  assert(grpId >= 0); assert(grpId < numGroups);
1716  assert(!isPoissonGroup(grpId));
1717  assert(current.size() == getGroupNumNeurons(grpId));
1718 
1719  int netId = groupConfigMDMap[grpId].netId;
1720  int lGrpId = groupConfigMDMap[grpId].lGrpId;
1721 
1722  // // update flag for faster handling at run-time
1723  // if (count_if(current.begin(), current.end(), isGreaterThanZero)) {
1724  // groupConfigs[0][grpId].WithCurrentInjection = true;
1725  // } else {
1726  // groupConfigs[0][grpId].WithCurrentInjection = false;
1727  // }
1728 
1729  // store external current in array
1730  for (int lNId = groupConfigs[netId][lGrpId].lStartN, j = 0; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++, j++) {
1731  managerRuntimeData.extCurrent[lNId] = current[j];
1732  }
1733 
1734  // copy to GPU if necessary
1735  // don't allocate; allocation done in generateRuntimeData
1736  if (netId < CPU_RUNTIME_BASE) {
1737  copyExternalCurrent(netId, lGrpId, &runtimeData[netId], cudaMemcpyHostToDevice, false);
1738  }
1739  else {
1740  copyExternalCurrent(netId, lGrpId, &runtimeData[netId], false);
1741  }
1742 }
1743 
1744 // writes network state to file
1745 // handling of file pointer should be handled externally: as far as this function is concerned, it is simply
1746 // trying to write to file
1747 void SNN::saveSimulation(FILE* fid, bool saveSynapseInfo) {
1748  int tmpInt;
1749  float tmpFloat;
1750 
1752 
1754  tmpInt = 294338571; // some int used to identify saveSimulation files
1755  if (!fwrite(&tmpInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1756 
1758  tmpFloat = 0.3f;
1759  if (!fwrite(&tmpFloat,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1760 
1762  tmpFloat = ((float)simTimeSec) + ((float)simTimeMs)/1000.0f;
1763  if (!fwrite(&tmpFloat,sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1764 
1766  stopTiming();
1767  tmpFloat = executionTime/1000.0f;
1768  if (!fwrite(&tmpFloat,sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1769 
1771 
1773  if (!fwrite(&glbNetworkConfig.numN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1774  int dummyInt = 0;
1775  //if (!fwrite(&numPreSynNet,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1776  // if (!fwrite(&dummyInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1777  //if (!fwrite(&numPostSynNet,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1778  // if (!fwrite(&dummyInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1779  if (!fwrite(&numGroups,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1780 
1782  char name[100];
1783  for (int gGrpId=0;gGrpId<numGroups;gGrpId++) {
1784  if (!fwrite(&groupConfigMDMap[gGrpId].gStartN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1785  if (!fwrite(&groupConfigMDMap[gGrpId].gEndN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1786 
1787  if (!fwrite(&groupConfigMap[gGrpId].grid.numX,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1788  if (!fwrite(&groupConfigMap[gGrpId].grid.numY,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1789  if (!fwrite(&groupConfigMap[gGrpId].grid.numZ,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1790 
1791  strncpy(name,groupConfigMap[gGrpId].grpName.c_str(),100);
1792  if (!fwrite(name,1,100,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1793  }
1794 
1795  if (!saveSynapseInfo) return;
1796 
1797  // Save number of local networks
1798  int net_count = 0;
1799  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
1800  if (!groupPartitionLists[netId].empty()) {
1801  net_count++;
1802  }
1803  }
1804  if (!fwrite(&net_count, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1805 
1806  // Save weights for each local network
1807  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
1808  if (!groupPartitionLists[netId].empty()) {
1809  // copy from runtimeData to managerRuntimeData
1810  fetchPreConnectionInfo(netId);
1811  fetchPostConnectionInfo(netId);
1812  fetchConnIdsLookupArray(netId);
1813  fetchSynapseState(netId);
1814 
1815  // save number of synapses that starting from local groups
1816  int numSynToSave = 0;
1817  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1818  if (grpIt->netId == netId) {
1819  numSynToSave += grpIt->numPostSynapses;
1820  }
1821  }
1822  if (!fwrite(&numSynToSave, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1823  // read synapse info from managerRuntimData
1824  int numSynSaved = 0;
1825  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) {
1826  unsigned int offset = managerRuntimeData.cumulativePost[lNId];
1827 
1828  // save each synapse starting from from neuron lNId
1829  for (int t = 0; t < glbNetworkConfig.maxDelay; t++) {
1830  DelayInfo dPar = managerRuntimeData.postDelayInfo[lNId*(glbNetworkConfig.maxDelay + 1)+t];
1831 
1832  for (int idx_d=dPar.delay_index_start; idx_d < (dPar.delay_index_start + dPar.delay_length); idx_d++) {
1833  SynInfo post_info = managerRuntimeData.postSynapticIds[offset + idx_d];
1834  int lNIdPost = GET_CONN_NEURON_ID(post_info);
1835  int lGrpIdPost = GET_CONN_GRP_ID(post_info);
1836  int preSynId = GET_CONN_SYN_ID(post_info);
1837  int pre_pos = managerRuntimeData.cumulativePre[lNIdPost] + preSynId;
1838  SynInfo pre_info = managerRuntimeData.preSynapticIds[pre_pos];
1839  int lNIdPre = GET_CONN_NEURON_ID(pre_info);
1840  int lGrpIdPre = GET_CONN_GRP_ID(pre_info);
1841  float weight = managerRuntimeData.wt[pre_pos];
1842  float maxWeight = managerRuntimeData.maxSynWt[pre_pos];
1843  int connId = managerRuntimeData.connIdsPreIdx[pre_pos];
1844  int delay = t+1;
1845 
1846  // convert local group id to global group id
1847  // convert local neuron id to neuron order in group
1848  int gGrpIdPre = groupConfigs[netId][lGrpIdPre].gGrpId;
1849  int gGrpIdPost = groupConfigs[netId][lGrpIdPost].gGrpId;
1850  int grpNIdPre = lNId - groupConfigs[netId][lGrpIdPre].lStartN;
1851  int grpNIdPost = lNIdPost - groupConfigs[netId][lGrpIdPost].lStartN;
1852 
1853  // we only save synapses starting from local groups since otherwise we will save external synapses twice
1854  // write order is based on function connectNeurons (no NetId & external_NetId)
1855  // inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, float initWt, float maxWt, uint8_t delay, int externalNetId)
1856  if (groupConfigMDMap[gGrpIdPre].netId == netId) {
1857  numSynSaved++;
1858  if (!fwrite(&gGrpIdPre, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1859  if (!fwrite(&gGrpIdPost, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1860  if (!fwrite(&grpNIdPre, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1861  if (!fwrite(&grpNIdPost, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1862  if (!fwrite(&connId, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1863  if (!fwrite(&weight, sizeof(float), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1864  if (!fwrite(&maxWeight, sizeof(float), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1865  if (!fwrite(&delay, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1866  }
1867  }
1868  }
1869  }
1870  assert(numSynSaved == numSynToSave);
1871  }
1872  }
1873 
1874 
1876  //if (simMode_ == GPU_MODE)
1877  // copyWeightState(&managerRuntimeData, &runtimeData[0], cudaMemcpyDeviceToHost, false);
1879 
1881  //if (saveSynapseInfo) {
1882  // for (int i = 0; i < numN; i++) {
1883  // unsigned int offset = managerRuntimeData.cumulativePost[i];
1884 
1885  // unsigned int count = 0;
1886  // for (int t=0;t<maxDelay_;t++) {
1887  // DelayInfo dPar = managerRuntimeData.postDelayInfo[i*(maxDelay_+1)+t];
1888 
1889  // for(int idx_d=dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++)
1890  // count++;
1891  // }
1892 
1893  // if (!fwrite(&count,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1894 
1895  // for (int t=0;t<maxDelay_;t++) {
1896  // DelayInfo dPar = managerRuntimeData.postDelayInfo[i*(maxDelay_+1)+t];
1897 
1898  // for(int idx_d=dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++) {
1899  // // get synaptic info...
1900  // SynInfo post_info = managerRuntimeData.postSynapticIds[offset + idx_d];
1901 
1902  // // get neuron id
1903  // //int p_i = (post_info&POST_SYN_NEURON_MASK);
1904  // unsigned int p_i = GET_CONN_NEURON_ID(post_info);
1905  // assert(p_i<numN);
1906 
1907  // // get syn id
1908  // unsigned int s_i = GET_CONN_SYN_ID(post_info);
1909  // //>>POST_SYN_NEURON_BITS)&POST_SYN_CONN_MASK;
1910  // assert(s_i<(managerRuntimeData.Npre[p_i]));
1911 
1912  // // get the cumulative position for quick access...
1913  // unsigned int pos_i = managerRuntimeData.cumulativePre[p_i] + s_i;
1914 
1915  // uint8_t delay = t+1;
1916  // uint8_t plastic = s_i < managerRuntimeData.Npre_plastic[p_i]; // plastic or fixed.
1917 
1918  // if (!fwrite(&i,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1919  // if (!fwrite(&p_i,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1920  // if (!fwrite(&(managerRuntimeData.wt[pos_i]),sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1921  // if (!fwrite(&(managerRuntimeData.maxSynWt[pos_i]),sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1922  // if (!fwrite(&delay,sizeof(uint8_t),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1923  // if (!fwrite(&plastic,sizeof(uint8_t),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1924  // if (!fwrite(&(managerRuntimeData.connIdsPreIdx[pos_i]),sizeof(short int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1925  // }
1926  // }
1927  // }
1928  //}
1929 }
1930 
1931 // writes population weights from gIDpre to gIDpost to file fname in binary
1932 //void SNN::writePopWeights(std::string fname, int grpIdPre, int grpIdPost) {
1933 // assert(grpIdPre>=0); assert(grpIdPost>=0);
1934 //
1935 // float* weights;
1936 // int matrixSize;
1937 // FILE* fid;
1938 // int numPre, numPost;
1939 // fid = fopen(fname.c_str(), "wb");
1940 // assert(fid != NULL);
1941 //
1942 // if(snnState == CONFIG_SNN || snnState == COMPILED_SNN || snnState == PARTITIONED_SNN){
1943 // KERNEL_ERROR("Simulation has not been run yet, cannot output weights.");
1944 // exitSimulation(1);
1945 // }
1946 //
1947 // SynInfo* preId;
1948 // int pre_nid, pos_ij;
1949 //
1950 // //population sizes
1951 // numPre = groupConfigs[0][grpIdPre].SizeN;
1952 // numPost = groupConfigs[0][grpIdPost].SizeN;
1953 //
1954 // //first iteration gets the number of synaptic weights to place in our
1955 // //weight matrix.
1956 // matrixSize=0;
1957 // //iterate over all neurons in the post group
1958 // for (int i=groupConfigs[0][grpIdPost].StartN; i<=groupConfigs[0][grpIdPost].EndN; i++) {
1959 // // for every post-neuron, find all pre
1960 // pos_ij = managerRuntimeData.cumulativePre[i]; // i-th neuron, j=0th synapse
1961 // //iterate over all presynaptic synapses
1962 // for(int j=0; j<managerRuntimeData.Npre[i]; pos_ij++,j++) {
1963 // preId = &managerRuntimeData.preSynapticIds[pos_ij];
1964 // pre_nid = GET_CONN_NEURON_ID((*preId)); // neuron id of pre
1965 // if (pre_nid<groupConfigs[0][grpIdPre].StartN || pre_nid>groupConfigs[0][grpIdPre].EndN)
1966 // continue; // connection does not belong to group grpIdPre
1967 // matrixSize++;
1968 // }
1969 // }
1970 //
1971 // //now we have the correct size
1972 // weights = new float[matrixSize];
1973 // //second iteration assigns the weights
1974 // int curr = 0; // iterator for return array
1975 // //iterate over all neurons in the post group
1976 // for (int i=groupConfigs[0][grpIdPost].StartN; i<=groupConfigs[0][grpIdPost].EndN; i++) {
1977 // // for every post-neuron, find all pre
1978 // pos_ij = managerRuntimeData.cumulativePre[i]; // i-th neuron, j=0th synapse
1979 // //do the GPU copy here. Copy the current weights from GPU to CPU.
1980 // if(simMode_==GPU_MODE){
1981 // copyWeightsGPU(i,grpIdPre);
1982 // }
1983 // //iterate over all presynaptic synapses
1984 // for(int j=0; j<managerRuntimeData.Npre[i]; pos_ij++,j++) {
1985 // preId = &(managerRuntimeData.preSynapticIds[pos_ij]);
1986 // pre_nid = GET_CONN_NEURON_ID((*preId)); // neuron id of pre
1987 // if (pre_nid<groupConfigs[0][grpIdPre].StartN || pre_nid>groupConfigs[0][grpIdPre].EndN)
1988 // continue; // connection does not belong to group grpIdPre
1989 // weights[curr] = managerRuntimeData.wt[pos_ij];
1990 // curr++;
1991 // }
1992 // }
1993 //
1994 // fwrite(weights,sizeof(float),matrixSize,fid);
1995 // fclose(fid);
1996 // //Let my memory FREE!!!
1997 // delete [] weights;
1998 //}
1999 
2000 
2004 
2005 // set new file pointer for all files
2006 // fp==NULL is code for don't change it
2007 // can be called in all logger modes; however, the analogous interface function can only be called in CUSTOM
2008 void SNN::setLogsFp(FILE* fpInf, FILE* fpErr, FILE* fpDeb, FILE* fpLog) {
2009  if (fpInf!=NULL) {
2010  if (fpInf_!=NULL && fpInf_!=stdout && fpInf_!=stderr)
2011  fclose(fpInf_);
2012  fpInf_ = fpInf;
2013  }
2014 
2015  if (fpErr!=NULL) {
2016  if (fpErr_ != NULL && fpErr_!=stdout && fpErr_!=stderr)
2017  fclose(fpErr_);
2018  fpErr_ = fpErr;
2019  }
2020 
2021  if (fpDeb!=NULL) {
2022  if (fpDeb_!=NULL && fpDeb_!=stdout && fpDeb_!=stderr)
2023  fclose(fpDeb_);
2024  fpDeb_ = fpDeb;
2025  }
2026 
2027  if (fpLog!=NULL) {
2028  if (fpLog_!=NULL && fpLog_!=stdout && fpLog_!=stderr)
2029  fclose(fpLog_);
2030  fpLog_ = fpLog;
2031  }
2032 }
2033 
2034 
2038 
2039 // loop over linked list entries to find a connection with the right pre-post pair, O(N)
2040 short int SNN::getConnectId(int grpIdPre, int grpIdPost) {
2041  short int connId = -1;
2042 
2043  for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
2044  if (it->second.grpSrc == grpIdPre && it->second.grpDest == grpIdPost) {
2045  connId = it->second.connId;
2046  break;
2047  }
2048  }
2049 
2050  return connId;
2051 }
2052 
2054  CHECK_CONNECTION_ID(connId, numConnections);
2055 
2056  if (connectConfigMap.find(connId) == connectConfigMap.end()) {
2057  KERNEL_ERROR("Total Connections = %d", numConnections);
2058  KERNEL_ERROR("ConnectId (%d) cannot be recognized", connId);
2059  }
2060 
2061  return connectConfigMap[connId];
2062 }
2063 
2064 std::vector<float> SNN::getConductanceAMPA(int gGrpId) {
2065  assert(isSimulationWithCOBA());
2066 
2067  // copy data to the manager runtime
2068  fetchConductanceAMPA(gGrpId);
2069 
2070  std::vector<float> gAMPAvec;
2071  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
2072  gAMPAvec.push_back(managerRuntimeData.gAMPA[gNId]);
2073  }
2074  return gAMPAvec;
2075 }
2076 
2077 std::vector<float> SNN::getConductanceNMDA(int gGrpId) {
2078 #ifdef LN_I_CALC_TYPES
2079  assert(groupConfigMap[gGrpId].icalcType == COBA);
2080 #else
2081  assert(isSimulationWithCOBA());
2082 #endif
2083 
2084  // copy data to the manager runtime
2085  fetchConductanceNMDA(gGrpId);
2086 
2087  std::vector<float> gNMDAvec;
2088 #ifdef LN_I_CALC_TYPES
2089  if(groupConfigMap[gGrpId].with_NMDA_rise) {
2090 #else
2091  if (isSimulationWithNMDARise()) {
2092 #endif
2093  // need to construct conductance from rise and decay parts
2094  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
2095  gNMDAvec.push_back(managerRuntimeData.gNMDA_d[gNId] - managerRuntimeData.gNMDA_r[gNId]);
2096  }
2097  } else {
2098  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
2099  gNMDAvec.push_back(managerRuntimeData.gNMDA[gNId]);
2100  }
2101  }
2102  return gNMDAvec;
2103 }
2104 
2105 std::vector<float> SNN::getConductanceGABAa(int gGrpId) {
2106  assert(isSimulationWithCOBA());
2107 
2108  // copy data to the manager runtime
2109  fetchConductanceGABAa(gGrpId);
2110 
2111  std::vector<float> gGABAaVec;
2112  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
2113  gGABAaVec.push_back(managerRuntimeData.gGABAa[gNId]);
2114  }
2115  return gGABAaVec;
2116 }
2117 
2118 std::vector<float> SNN::getConductanceGABAb(int gGrpId) {
2119 #ifdef LN_I_CALC_TYPES
2120  assert(groupConfigMap[gGrpId].icalcType == COBA);
2121 #else
2122  assert(isSimulationWithCOBA());
2123 #endif
2124 
2125  // copy data to the manager runtime
2126  fetchConductanceGABAb(gGrpId);
2127 
2128  std::vector<float> gGABAbVec;
2129 #ifdef LN_I_CALC_TYPES
2130  if (groupConfigMap[gGrpId].with_GABAb_rise) {
2131 #else
2132  if (isSimulationWithGABAbRise()) {
2133 #endif
2134  // need to construct conductance from rise and decay parts
2135  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
2136  gGABAbVec.push_back(managerRuntimeData.gGABAb_d[gNId] - managerRuntimeData.gGABAb_r[gNId]);
2137  }
2138  } else {
2139  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
2140  gGABAbVec.push_back(managerRuntimeData.gGABAb[gNId]);
2141  }
2142  }
2143  return gGABAbVec;
2144 }
2145 
2146 // returns RangeDelay struct of a connection
2147 RangeDelay SNN::getDelayRange(short int connId) {
2148  assert(connId>=0 && connId<numConnections);
2149 
2150  return RangeDelay(connectConfigMap[connId].minDelay, connectConfigMap[connId].maxDelay);
2151 }
2152 
2153 // \TODO: bad API design (return allocated memory to user), consider to move this function to connection monitor
2154 uint8_t* SNN::getDelays(int gGrpIdPre, int gGrpIdPost, int& numPreN, int& numPostN) {
2155  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
2156  int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
2157  int lGrpIdPre = -1;
2158  uint8_t* delays;
2159 
2160  for (int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
2161  if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
2162  lGrpIdPre = lGrpId;
2163  break;
2164  }
2165  assert(lGrpIdPre != -1);
2166 
2167  numPreN = groupConfigMap[gGrpIdPre].numN;
2168  numPostN = groupConfigMap[gGrpIdPost].numN;
2169 
2170  delays = new uint8_t[numPreN * numPostN];
2171  memset(delays, 0, numPreN * numPostN);
2172 
2173  fetchPostConnectionInfo(netIdPost);
2174 
2175  for (int lNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN; lNIdPre <= groupConfigs[netIdPost][lGrpIdPre].lEndN; lNIdPre++) { // FIXED LN 2022 the end is an index as well
2176  unsigned int offset = managerRuntimeData.cumulativePost[lNIdPre];
2177 
2178  for (int t = 0; t < glbNetworkConfig.maxDelay; t++) {
2179  DelayInfo dPar = managerRuntimeData.postDelayInfo[lNIdPre * (glbNetworkConfig.maxDelay + 1) + t];
2180 
2181  for(int idx_d = dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++) {
2182  // get synaptic info...
2183  SynInfo postSynInfo = managerRuntimeData.postSynapticIds[offset + idx_d];
2184 
2185  // get local post neuron id
2186  int lNIdPost = GET_CONN_NEURON_ID(postSynInfo);
2187  assert(lNIdPost < glbNetworkConfig.numN);
2188 
2189  if (lNIdPost >= groupConfigs[netIdPost][lGrpIdPost].lStartN && lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN) {
2190  delays[(lNIdPre - groupConfigs[netIdPost][lGrpIdPre].lStartN) + numPreN * (lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN)] = t + 1;
2191  }
2192  }
2193  }
2194  }
2195  return delays;
2196 }
2197 
2199  assert(gGrpId >= 0 && gGrpId < numGroups);
2200 
2201  return groupConfigMap[gGrpId].grid;
2202 }
2203 
2204 // find ID of group with name grpName
2205 int SNN::getGroupId(std::string grpName) {
2206  int grpId = -1;
2207  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
2208  if (groupConfigMap[gGrpId].grpName.compare(grpName) == 0) {
2209  grpId = gGrpId;
2210  break;
2211  }
2212  }
2213 
2214  return grpId;
2215 }
2216 
2217 std::string SNN::getGroupName(int gGrpId) {
2218  assert(gGrpId >= -1 && gGrpId < numGroups);
2219 
2220  if (gGrpId == ALL)
2221  return "ALL";
2222 
2223  return groupConfigMap[gGrpId].grpName;
2224 }
2225 
2227  ConnSTDPInfo cInfo;
2228 
2229  cInfo.WithSTDP = connectConfigMap[connId].stdpConfig.WithSTDP;
2230  cInfo.WithESTDP = connectConfigMap[connId].stdpConfig.WithESTDP;
2231  cInfo.WithISTDP = connectConfigMap[connId].stdpConfig.WithISTDP;
2232  cInfo.WithESTDPtype = connectConfigMap[connId].stdpConfig.WithESTDPtype;
2233  cInfo.WithISTDPtype = connectConfigMap[connId].stdpConfig.WithISTDPtype;
2234  cInfo.WithESTDPcurve = connectConfigMap[connId].stdpConfig.WithESTDPcurve;
2235  cInfo.WithISTDPcurve = connectConfigMap[connId].stdpConfig.WithISTDPcurve;
2236  cInfo.ALPHA_MINUS_EXC = connectConfigMap[connId].stdpConfig.ALPHA_MINUS_EXC;
2237  cInfo.ALPHA_PLUS_EXC = connectConfigMap[connId].stdpConfig.ALPHA_PLUS_EXC;
2238  cInfo.TAU_MINUS_INV_EXC = connectConfigMap[connId].stdpConfig.TAU_MINUS_INV_EXC;
2239  cInfo.TAU_PLUS_INV_EXC = connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_EXC;
2240  cInfo.ALPHA_MINUS_INB = connectConfigMap[connId].stdpConfig.ALPHA_MINUS_INB;
2241  cInfo.ALPHA_PLUS_INB = connectConfigMap[connId].stdpConfig.ALPHA_PLUS_INB;
2242  cInfo.TAU_MINUS_INV_INB = connectConfigMap[connId].stdpConfig.TAU_MINUS_INV_INB;
2243  cInfo.TAU_PLUS_INV_INB = connectConfigMap[connId].stdpConfig.TAU_PLUS_INV_INB;
2244  cInfo.GAMMA = connectConfigMap[connId].stdpConfig.GAMMA;
2245  cInfo.BETA_LTP = connectConfigMap[connId].stdpConfig.BETA_LTP;
2246  cInfo.BETA_LTD = connectConfigMap[connId].stdpConfig.BETA_LTD;
2247  cInfo.LAMBDA = connectConfigMap[connId].stdpConfig.LAMBDA;
2248  cInfo.DELTA = connectConfigMap[connId].stdpConfig.DELTA;
2249 
2250  return cInfo;
2251 }
2252 
2255 
2256  gInfo.baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
2257  gInfo.base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
2258  gInfo.baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
2259  gInfo.baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
2260 
2261  gInfo.decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
2262  gInfo.decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
2263  gInfo.decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
2264  gInfo.decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
2265 
2266  gInfo.releaseDP = groupConfigMap[gGrpId].neuromodulatorConfig.releaseDP;
2267  gInfo.release5HT = groupConfigMap[gGrpId].neuromodulatorConfig.release5HT;
2268  gInfo.releaseACh = groupConfigMap[gGrpId].neuromodulatorConfig.releaseACh;
2269  gInfo.releaseNE = groupConfigMap[gGrpId].neuromodulatorConfig.releaseNE;
2270 
2271  gInfo.activeDP = groupConfigMap[gGrpId].neuromodulatorConfig.activeDP;
2272  gInfo.active5HT = groupConfigMap[gGrpId].neuromodulatorConfig.active5HT;
2273  gInfo.activeACh = groupConfigMap[gGrpId].neuromodulatorConfig.activeACh;
2274  gInfo.activeNE = groupConfigMap[gGrpId].neuromodulatorConfig.activeNE;
2275 
2276  return gInfo;
2277 }
2278 
2280  int gGrpId = -1;
2281  assert(gNId >= 0 && gNId < glbNetworkConfig.numN);
2282 
2283  // search for global group id
2284  for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
2285  if (gNId >= grpIt->second.gStartN && gNId <= grpIt->second.gEndN)
2286  gGrpId = grpIt->second.gGrpId;
2287  }
2288 
2289  // adjust neurId for neuron ID of first neuron in the group
2290  int neurId = gNId - groupConfigMDMap[gGrpId].gStartN;
2291 
2292  return getNeuronLocation3D(gGrpId, neurId);
2293 }
2294 
2295 Point3D SNN::getNeuronLocation3D(int gGrpId, int relNeurId) {
2296  Grid3D grid = groupConfigMap[gGrpId].grid;
2297  assert(gGrpId >= 0 && gGrpId < numGroups);
2298  assert(relNeurId >= 0 && relNeurId < getGroupNumNeurons(gGrpId));
2299 
2300  int intX = relNeurId % grid.numX;
2301  int intY = (relNeurId / grid.numX) % grid.numY;
2302  int intZ = relNeurId / (grid.numX * grid.numY);
2303 
2304  // get coordinates center around origin
2305  double coordX = grid.distX * intX + grid.offsetX;
2306  double coordY = grid.distY * intY + grid.offsetY;
2307  double coordZ = grid.distZ * intZ + grid.offsetZ;
2308  return Point3D(coordX, coordY, coordZ);
2309 }
2310 
2311 
2312 // LN DEBUG gaussian problem
2313 // TODO check latest CARLsim 4.1 and 5.0
2314 
2315 int SNN::getNeuronId(int gGrpId, Point3D location) {
2316 
2317  Grid3D grid = groupConfigMap[gGrpId].grid;
2318  assert(gGrpId >= 0 && gGrpId < numGroups);
2319 
2320  // translate coordinates center around origin
2321  int intX = round((location.x - grid.offsetX) / grid.distX); // location.x = grid.distX * intX + grid.offsetX;
2322  int intY = round((location.y - grid.offsetY) / grid.distY); // location.y = grid.distY * intY + grid.offsetY;
2323  int intZ = round((location.z - grid.offsetZ) / grid.distZ); // location.z = grid.distZ * intZ + grid.offsetZ;
2324 
2325 // int intX = roundf((location.x - grid.offsetX - 1) / grid.distX - (-(grid.numX - 1) / 2)); // location.x = grid.distX * intX + grid.offsetX;
2326 // int intY = roundf((location.y - grid.offsetY - 1) / grid.distY - (-(grid.numY - 1) / 2)); ; // location.y = grid.distY * intY + grid.offsetY;
2327 // int intZ = roundf((location.z - grid.offsetZ - 1) / grid.distZ - (-(grid.numZ - 1) / 2)); ; // location.z = grid.distZ * intZ + grid.offsetZ;
2328 
2329 // int intX = roundf(location.x + (grid.numX - 1) / 2); // location.x = grid.distX * intX + grid.offsetX;
2330 // int intY = roundf(location.y + (grid.numY - 1) / 2); // location.y = grid.distY * intY + grid.offsetY;
2331 // int intZ = roundf(location.z + (grid.numZ - 1) / 2); // location.z = grid.distZ * intZ + grid.offsetZ;
2332 
2333 // printf("intX=%d, intY=%d, intZ=%d\n", intX, intY, intZ);
2334 
2335  // solve function that fullfils:
2336  // intX = relNeurId % grid.numX;
2337  // intY = (relNeurId / grid.numX) % grid.numY;
2338  // intZ = relNeurId / (grid.numX * grid.numY);
2339  //int relNeurId = intX + intY * grid.numX + intY + intZ * (grid.numX * grid.numY) + intZ;
2340  int relNeurId = intX + intY * grid.numX + intZ * (grid.numX * grid.numY);
2341  assert(relNeurId >= 0 && relNeurId < getGroupNumNeurons(gGrpId));
2342 
2343 /*
2344  Nx = 2
2345  [-(Nx-1)/2, (Nx-1)/2]
2346 
2347  x_min = -(Nx-1)/2 = -(2-1)/2 = -0.5
2348 
2349  = offset
2350 */
2351 
2352  return relNeurId;
2353 }
2354 
2355 
2356 // returns the number of synaptic connections associated with this connection.
2357 int SNN::getNumSynapticConnections(short int connId) {
2358  //we didn't find the connection.
2359  if (connectConfigMap.find(connId) == connectConfigMap.end()) {
2360  KERNEL_ERROR("Connection ID was not found. Quitting.");
2362  }
2363 
2364  return connectConfigMap[connId].numberOfConnections;
2365 }
2366 
2367 // returns pointer to existing SpikeMonitor object, NULL else
2369  assert(gGrpId >= 0 && gGrpId < getNumGroups());
2370 
2371  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
2372  return spikeMonList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
2373  } else {
2374  return NULL;
2375  }
2376 }
2377 
2379  assert(gGrpId >= 0 && gGrpId < getNumGroups());
2380 
2381  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
2382  return spikeMonCoreList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
2383  } else {
2384  return NULL;
2385  }
2386 }
2387 
2388 // returns pointer to existing NeuronMonitor object, NULL else
2390  assert(gGrpId >= 0 && gGrpId < getNumGroups());
2391 
2392  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
2393  return neuronMonList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
2394  }
2395  else {
2396  return NULL;
2397  }
2398 }
2399 
2401  assert(gGrpId >= 0 && gGrpId < getNumGroups());
2402 
2403  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
2404  return neuronMonCoreList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
2405  }
2406  else {
2407  return NULL;
2408  }
2409 }
2410 
2412  assert(connId>=0 && connId<numConnections);
2413 
2414  return RangeWeight(0.0f, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt);
2415 }
2416 
2417 
2421 
2422 // all unsafe operations of SNN constructor
2423 void SNN::SNNinit() {
2424  // initialize snnState
2425  snnState = CONFIG_SNN;
2426 
2427  // set logger mode (defines where to print all status, error, and debug messages)
2428  switch (loggerMode_) {
2429  case USER:
2430  fpInf_ = stdout;
2431  fpErr_ = stderr;
2432  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2433  fpDeb_ = fopen("nul","w");
2434  #else
2435  fpDeb_ = fopen("/dev/null","w");
2436  #endif
2437  break;
2438  case DEVELOPER:
2439  fpInf_ = stdout;
2440  fpErr_ = stderr;
2441  fpDeb_ = stdout;
2442  break;
2443  case SHOWTIME:
2444  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2445  fpInf_ = fopen("nul","w");
2446  #else
2447  fpInf_ = fopen("/dev/null","w");
2448  #endif
2449  fpErr_ = stderr;
2450  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2451  fpDeb_ = fopen("nul","w");
2452  #else
2453  fpDeb_ = fopen("/dev/null","w");
2454  #endif
2455  break;
2456  case SILENT:
2457  case CUSTOM:
2458  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2459  fpInf_ = fopen("nul","w");
2460  fpErr_ = fopen("nul","w");
2461  fpDeb_ = fopen("nul","w");
2462  #else
2463  fpInf_ = fopen("/dev/null","w");
2464  fpErr_ = fopen("/dev/null","w");
2465  fpDeb_ = fopen("/dev/null","w");
2466  #endif
2467  break;
2468  default:
2469  fpErr_ = stderr; // need to open file stream first
2470  KERNEL_ERROR("Unknown logger mode");
2471  exit(UNKNOWN_LOGGER_ERROR);
2472 
2473  }
2474 
2475  // try to open log file in results folder: create if not exists
2476 #if defined(WIN32) || defined(WIN64)
2477  CreateDirectory("results", NULL);
2478  fpLog_ = fopen("results/carlsim.log", "w");
2479 #else
2480  struct stat sb;
2481  int createDir = 1;
2482  if (stat("results", &sb) == -1 || !S_ISDIR(sb.st_mode)) {
2483  // results dir does not exist, try to create:
2484  createDir = mkdir("results", 0777);
2485  }
2486 
2487  if (createDir == -1) {
2488  // tried to create dir, but failed
2489  fprintf(stderr, "Could not create directory \"results/\", which is required to "
2490  "store simulation results. Aborting simulation...\n");
2491  exit(NO_LOGGER_DIR_ERROR);
2492  } else {
2493  // open log file
2494  fpLog_ = fopen("results/carlsim.log", "w");
2495 
2496  if (createDir == 0) {
2497  // newly created dir: now that fpLog_/fpInf_ exist, inform user
2498  KERNEL_INFO("Created results directory \"results/\".");
2499  }
2500  }
2501 #endif
2502  if (fpLog_ == NULL) {
2503  fprintf(stderr, "Could not create the directory \"results/\" or the log file \"results/carlsim.log\""
2504  ", which is required to store simulation results. Aborting simulation...\n");
2505  exit(NO_LOGGER_DIR_ERROR);
2506  }
2507 
2508  KERNEL_INFO("*********************************************************************************");
2509  KERNEL_INFO("******************** Welcome to CARLsim %d.%d ***************************",
2511  KERNEL_INFO("*********************************************************************************\n");
2512 
2513  KERNEL_INFO("***************************** Configuring Network ********************************");
2514  KERNEL_INFO("Starting CARLsim simulation \"%s\" in %s mode",networkName_.c_str(),
2515  loggerMode_string[loggerMode_]);
2516  KERNEL_INFO("Random number seed: %d",randSeed_);
2517 
2518  time_t rawtime;
2519  struct tm * timeinfo;
2520  time(&rawtime);
2521  timeinfo = localtime(&rawtime);
2522  KERNEL_DEBUG("Current local time and date: %s", asctime(timeinfo));
2523 
2524  // init random seed
2525  srand48(randSeed_);
2526 
2527  simTimeRunStart = 0; simTimeRunStop = 0;
2528  simTimeLastRunSummary = 0;
2529  simTimeMs = 0; simTimeSec = 0; simTime = 0;
2530 
2531  numGroups = 0;
2532  numConnections = 0;
2533  numCompartmentConnections = 0;
2534  numSpikeGenGrps = 0;
2535  simulatorDeleted = false;
2536 
2537  cumExecutionTime = 0.0f;
2538  executionTime = 0.0f;
2539  prevExecutionTime = 0.0f; // FIX 2022: Wrong display in Debug Mode
2540 
2541  spikeRateUpdated = false;
2542  numSpikeMonitor = 0;
2543  numNeuronMonitor = 0;
2544  numGroupMonitor = 0;
2545  numConnectionMonitor = 0;
2546 
2547  sim_with_compartments = false;
2548  sim_with_fixedwts = true; // default is true, will be set to false if there are any plastic synapses
2549 #define LN_I_CALC_TYPES__REQUIRED_FOR_NETWORK_LEVEL
2550  sim_with_conductances = false; // default is false
2551  sim_with_stdp = false;
2552  sim_with_modulated_stdp = false;
2553  sim_with_homeostasis = false;
2554  sim_with_stp = false;
2555  sim_in_testing = false;
2556 
2557  loadSimFID = NULL;
2558 
2559 #define LN_I_CALC_TYPES__REQUIRED_FOR_NETWORK_LEVEL
2560  // conductance info struct for simulation
2561  sim_with_NMDA_rise = false;
2562  sim_with_GABAb_rise = false;
2563 #define LN_I_CALC_TYPES__REQUIRED_FOR_NETWORK_LEVEL
2564 #ifndef LN_I_CALC_TYPES
2565  dAMPA = 1.0-1.0/5.0; // some default decay and rise times
2566  rNMDA = 1.0-1.0/10.0;
2567  dNMDA = 1.0-1.0/150.0;
2568  sNMDA = 1.0;
2569  dGABAa = 1.0-1.0/6.0;
2570  rGABAb = 1.0-1.0/100.0;
2571  dGABAb = 1.0-1.0/150.0;
2572  sGABAb = 1.0;
2573 #endif
2574  // default integration method: Forward-Euler with 0.5ms integration step
2576 
2577  mulSynFast = NULL;
2578  mulSynSlow = NULL;
2579 
2580  // reset all monitors, don't deallocate (false)
2581  resetMonitors(false);
2582 
2583  resetGroupConfigs(false);
2584 
2585  resetConnectionConfigs(false);
2586 
2587  // initialize spike buffer
2588  spikeBuf = new SpikeBuffer(0, MAX_TIME_SLICE);
2589 
2590  memset(networkConfigs, 0, sizeof(NetworkConfigRT) * MAX_NET_PER_SNN);
2591 
2592  // reset all runtime data
2593  // GPU/CPU runtime data
2594  memset(runtimeData, 0, sizeof(RuntimeData) * MAX_NET_PER_SNN);
2595  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) // FIXME: redundant?? //LN2021 Yes, 0 --> false
2596  runtimeData[netId].allocated = false;
2597 
2598  // Manager runtime data
2599  memset(&managerRuntimeData, 0, sizeof(RuntimeData));
2600  managerRuntimeData.allocated = false; // FIXME: redundant??
2601 
2602  // default weight update parameter
2603  wtANDwtChangeUpdateInterval_ = 1000; // update weights every 1000 ms (default)
2604  wtANDwtChangeUpdateIntervalCnt_ = 0; // helper var to implement fast modulo
2605  stdpScaleFactor_ = 1.0f;
2606  wtChangeDecay_ = 0.0f;
2607 
2608  // FIXME: use it when necessary
2609 #ifndef __NO_CUDA__
2610  CUDA_CREATE_TIMER(timer);
2611  CUDA_RESET_TIMER(timer);
2612 #endif
2613 }
2614 
2615 void SNN::advSimStep() {
2616  doSTPUpdateAndDecayCond();
2617 
2618  //KERNEL_INFO("STPUpdate!");
2619 
2620  spikeGeneratorUpdate();
2621 
2622  //KERNEL_INFO("spikeGeneratorUpdate!");
2623 
2624  findFiring();
2625 
2626  //KERNEL_INFO("Find firing!");
2627 
2628  updateTimingTable();
2629 
2630  routeSpikes();
2631 
2632  doCurrentUpdate();
2633 
2634  //KERNEL_INFO("doCurrentUpdate!");
2635 
2636  globalStateUpdate();
2637 
2638  //KERNEL_INFO("globalStateUpdate!");
2639 
2640  clearExtFiringTable();
2641 }
2642 
2643 void SNN::doSTPUpdateAndDecayCond() {
2644  #ifndef __NO_PTHREADS__ // POSIX
2645  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2646  cpu_set_t cpus;
2647  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2648  int threadCount = 0;
2649  #endif
2650 
2651  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2652  if (!groupPartitionLists[netId].empty()) {
2653  assert(runtimeData[netId].allocated);
2654  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2655  doSTPUpdateAndDecayCond_GPU(netId);
2656  else{//CPU runtime
2657  #ifdef __NO_PTHREADS__
2658  doSTPUpdateAndDecayCond_CPU(netId);
2659  #else // Linux or MAC
2660  pthread_attr_t attr;
2661  pthread_attr_init(&attr);
2662  CPU_ZERO(&cpus);
2663  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2664  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2665 
2666  argsThreadRoutine[threadCount].snn_pointer = this;
2667  argsThreadRoutine[threadCount].netId = netId;
2668  argsThreadRoutine[threadCount].lGrpId = 0;
2669  argsThreadRoutine[threadCount].startIdx = 0;
2670  argsThreadRoutine[threadCount].endIdx = 0;
2671  argsThreadRoutine[threadCount].GtoLOffset = 0;
2672 
2673  pthread_create(&threads[threadCount], &attr, &SNN::helperDoSTPUpdateAndDecayCond_CPU, (void*)&argsThreadRoutine[threadCount]);
2674  pthread_attr_destroy(&attr);
2675  threadCount++;
2676  #endif
2677  }
2678  }
2679  }
2680 
2681  #ifndef __NO_PTHREADS__ // POSIX
2682  // join all the threads
2683  for (int i=0; i<threadCount; i++){
2684  pthread_join(threads[i], NULL);
2685  }
2686  #endif
2687 }
2688 
2689 void SNN::spikeGeneratorUpdate() {
2690  // If poisson rate has been updated, assign new poisson rate
2691  if (spikeRateUpdated) {
2692  #ifndef __NO_PTHREADS__ // POSIX
2693  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2694  cpu_set_t cpus;
2695  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2696  int threadCount = 0;
2697  #endif
2698 
2699  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2700  if (!groupPartitionLists[netId].empty()) {
2701  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2702  assignPoissonFiringRate_GPU(netId);
2703  else{ // CPU runtime
2704  #ifdef __NO_PTHREADS__
2705  assignPoissonFiringRate_CPU(netId);
2706  #else // Linux or MAC
2707  pthread_attr_t attr;
2708  pthread_attr_init(&attr);
2709  CPU_ZERO(&cpus);
2710  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2711  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2712 
2713  argsThreadRoutine[threadCount].snn_pointer = this;
2714  argsThreadRoutine[threadCount].netId = netId;
2715  argsThreadRoutine[threadCount].lGrpId = 0;
2716  argsThreadRoutine[threadCount].startIdx = 0;
2717  argsThreadRoutine[threadCount].endIdx = 0;
2718  argsThreadRoutine[threadCount].GtoLOffset = 0;
2719 
2720  pthread_create(&threads[threadCount], &attr, &SNN::helperAssignPoissonFiringRate_CPU, (void*)&argsThreadRoutine[threadCount]);
2721  pthread_attr_destroy(&attr);
2722  threadCount++;
2723  #endif
2724  }
2725  }
2726  }
2727 
2728  #ifndef __NO_PTHREADS__ // POSIX
2729  // join all the threads
2730  for (int i=0; i<threadCount; i++){
2731  pthread_join(threads[i], NULL);
2732  }
2733  #endif
2734 
2735  spikeRateUpdated = false;
2736  }
2737 
2738  // If time slice has expired, check if new spikes needs to be generated by user-defined spike generators
2739  generateUserDefinedSpikes();
2740 
2741  #ifndef __NO_PTHREADS__ // POSIX
2742  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2743  cpu_set_t cpus;
2744  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2745  int threadCount = 0;
2746  #endif
2747 
2748  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2749  if (!groupPartitionLists[netId].empty()) {
2750  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2751  spikeGeneratorUpdate_GPU(netId);
2752  else{ // CPU runtime
2753  #ifdef __NO_PTHREADS__
2754  spikeGeneratorUpdate_CPU(netId);
2755  #else // Linux or MAC
2756  pthread_attr_t attr;
2757  pthread_attr_init(&attr);
2758  CPU_ZERO(&cpus);
2759  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2760  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2761 
2762  argsThreadRoutine[threadCount].snn_pointer = this;
2763  argsThreadRoutine[threadCount].netId = netId;
2764  argsThreadRoutine[threadCount].lGrpId = 0;
2765  argsThreadRoutine[threadCount].startIdx = 0;
2766  argsThreadRoutine[threadCount].endIdx = 0;
2767  argsThreadRoutine[threadCount].GtoLOffset = 0;
2768 
2769  pthread_create(&threads[threadCount], &attr, &SNN::helperSpikeGeneratorUpdate_CPU, (void*)&argsThreadRoutine[threadCount]);
2770  pthread_attr_destroy(&attr);
2771  threadCount++;
2772  #endif
2773  }
2774  }
2775  }
2776 
2777  #ifndef __NO_PTHREADS__ // POSIX
2778  // join all the threads
2779  for (int i=0; i<threadCount; i++){
2780  pthread_join(threads[i], NULL);
2781  }
2782  #endif
2783 
2784  // tell the spike buffer to advance to the next time step
2785  spikeBuf->step();
2786 }
2787 
2788 void SNN::findFiring() {
2789  #ifndef __NO_PTHREADS__ // POSIX
2790  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2791  cpu_set_t cpus;
2792  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2793  int threadCount = 0;
2794  #endif
2795 
2796  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2797  if (!groupPartitionLists[netId].empty()) {
2798  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2799  findFiring_GPU(netId);
2800  else {// CPU runtime
2801  #ifdef __NO_PTHREADS__
2802  findFiring_CPU(netId);
2803  #else // Linux or MAC
2804  pthread_attr_t attr;
2805  pthread_attr_init(&attr);
2806  CPU_ZERO(&cpus);
2807  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2808  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2809 
2810  argsThreadRoutine[threadCount].snn_pointer = this;
2811  argsThreadRoutine[threadCount].netId = netId;
2812  argsThreadRoutine[threadCount].lGrpId = 0;
2813  argsThreadRoutine[threadCount].startIdx = 0;
2814  argsThreadRoutine[threadCount].endIdx = 0;
2815  argsThreadRoutine[threadCount].GtoLOffset = 0;
2816 
2817  pthread_create(&threads[threadCount], &attr, &SNN::helperFindFiring_CPU, (void*)&argsThreadRoutine[threadCount]);
2818  pthread_attr_destroy(&attr);
2819  threadCount++;
2820  #endif
2821  }
2822  }
2823  }
2824 
2825  #ifndef __NO_PTHREADS__ // POSIX
2826  // join all the threads
2827  for (int i=0; i<threadCount; i++){
2828  pthread_join(threads[i], NULL);
2829  }
2830  #endif
2831 }
2832 
2833 void SNN::doCurrentUpdate() {
2834  #ifndef __NO_PTHREADS__ // POSIX
2835  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2836  cpu_set_t cpus;
2837  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2838  int threadCount = 0;
2839  #endif
2840 
2841  // This loop updates and generates spikes on connections with a delay of >1ms
2842  // (by calling doCurrentUpdateD2_GPU() and helperDoCurrentUpdateD2_CPU())
2843  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2844  if (!groupPartitionLists[netId].empty()) {
2845  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2846  doCurrentUpdateD2_GPU(netId);
2847  else{ // CPU runtime
2848  #ifdef __NO_PTHREADS__
2849  doCurrentUpdateD2_CPU(netId);
2850  #else // Linux or MAC
2851  pthread_attr_t attr;
2852  pthread_attr_init(&attr);
2853  CPU_ZERO(&cpus);
2854  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2855  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2856 
2857  argsThreadRoutine[threadCount].snn_pointer = this;
2858  argsThreadRoutine[threadCount].netId = netId;
2859  argsThreadRoutine[threadCount].lGrpId = 0;
2860  argsThreadRoutine[threadCount].startIdx = 0;
2861  argsThreadRoutine[threadCount].endIdx = 0;
2862  argsThreadRoutine[threadCount].GtoLOffset = 0;
2863 
2864  pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD2_CPU, (void*)&argsThreadRoutine[threadCount]);
2865  pthread_attr_destroy(&attr);
2866  threadCount++;
2867  #endif
2868  }
2869  }
2870  }
2871 
2872  #ifndef __NO_PTHREADS__ // POSIX
2873  // join all the threads
2874  for (int i=0; i<threadCount; i++){
2875  pthread_join(threads[i], NULL);
2876  }
2877  threadCount = 0;
2878  #endif
2879 
2880  // This loop is very similar to the previous loop above, but it
2881  // updates and generates spikes on connections with a delay of <1ms
2882  // (by calling doCurrentUpdateD1_GPU() and helperDoCurrentUpdateD1_CPU())
2883  // XXX It would be nice to reduce the code duplication across these two loops
2884  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2885  if (!groupPartitionLists[netId].empty()) {
2886  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2887  doCurrentUpdateD1_GPU(netId);
2888  else{ // CPU runtime
2889  #ifdef __NO_PTHREADS__
2890  doCurrentUpdateD1_CPU(netId);
2891  #else // Linux or MAC
2892  pthread_attr_t attr;
2893  pthread_attr_init(&attr);
2894  CPU_ZERO(&cpus);
2895  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2896  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2897 
2898  argsThreadRoutine[threadCount].snn_pointer = this;
2899  argsThreadRoutine[threadCount].netId = netId;
2900  argsThreadRoutine[threadCount].lGrpId = 0;
2901  argsThreadRoutine[threadCount].startIdx = 0;
2902  argsThreadRoutine[threadCount].endIdx = 0;
2903  argsThreadRoutine[threadCount].GtoLOffset = 0;
2904 
2905  pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD1_CPU, (void*)&argsThreadRoutine[threadCount]);
2906  pthread_attr_destroy(&attr);
2907  threadCount++;
2908  #endif
2909  }
2910  }
2911  }
2912 
2913  #ifndef __NO_PTHREADS__ // POSIX
2914  // join all the threads
2915  for (int i=0; i<threadCount; i++){
2916  pthread_join(threads[i], NULL);
2917  }
2918  #endif
2919 }
2920 
2921 void SNN::updateTimingTable() {
2922  #ifndef __NO_PTHREADS__ // POSIX
2923  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2924  cpu_set_t cpus;
2925  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2926  int threadCount = 0;
2927  #endif
2928 
2929  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2930  if (!groupPartitionLists[netId].empty()) {
2931  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2932  updateTimingTable_GPU(netId);
2933  else{ // CPU runtime
2934  #ifdef __NO_PTHREADS__
2935  updateTimingTable_CPU(netId);
2936  #else // Linux or MAC
2937  pthread_attr_t attr;
2938  pthread_attr_init(&attr);
2939  CPU_ZERO(&cpus);
2940  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2941  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2942 
2943  argsThreadRoutine[threadCount].snn_pointer = this;
2944  argsThreadRoutine[threadCount].netId = netId;
2945  argsThreadRoutine[threadCount].lGrpId = 0;
2946  argsThreadRoutine[threadCount].startIdx = 0;
2947  argsThreadRoutine[threadCount].endIdx = 0;
2948  argsThreadRoutine[threadCount].GtoLOffset = 0;
2949 
2950  pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateTimingTable_CPU, (void*)&argsThreadRoutine[threadCount]);
2951  pthread_attr_destroy(&attr);
2952  threadCount++;
2953  #endif
2954  }
2955  }
2956  }
2957  #ifndef __NO_PTHREADS__ // POSIX
2958  // join all the threads
2959  for (int i=0; i<threadCount; i++){
2960  pthread_join(threads[i], NULL);
2961  }
2962  #endif
2963 }
2964 
2965 void SNN::globalStateUpdate() {
2966  #ifndef __NO_PTHREADS__ // POSIX
2967  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2968  cpu_set_t cpus;
2969  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2970  int threadCount = 0;
2971  #endif
2972 
2973  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2974  if (!groupPartitionLists[netId].empty()) {
2975  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2976  globalStateUpdate_C_GPU(netId);
2977  else{ // CPU runtime
2978  #ifdef __NO_PTHREADS__
2979  globalStateUpdate_CPU(netId);
2980  #else // Linux or MAC
2981  pthread_attr_t attr;
2982  pthread_attr_init(&attr);
2983  CPU_ZERO(&cpus);
2984  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2985  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2986 
2987  argsThreadRoutine[threadCount].snn_pointer = this;
2988  argsThreadRoutine[threadCount].netId = netId;
2989  argsThreadRoutine[threadCount].lGrpId = 0;
2990  argsThreadRoutine[threadCount].startIdx = 0;
2991  argsThreadRoutine[threadCount].endIdx = 0;
2992  argsThreadRoutine[threadCount].GtoLOffset = 0;
2993 
2994  pthread_create(&threads[threadCount], &attr, &SNN::helperGlobalStateUpdate_CPU, (void*)&argsThreadRoutine[threadCount]);
2995  pthread_attr_destroy(&attr);
2996  threadCount++;
2997  #endif
2998  }
2999  }
3000  }
3001 
3002  #ifndef __NO_PTHREADS__ // POSIX
3003  // join all the threads
3004  for (int i=0; i<threadCount; i++){
3005  pthread_join(threads[i], NULL);
3006  }
3007  #endif
3008 
3009  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3010  if (!groupPartitionLists[netId].empty()) {
3011  if (netId < CPU_RUNTIME_BASE) // GPU runtime
3012  globalStateUpdate_N_GPU(netId);
3013  }
3014  }
3015 
3016  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3017  if (!groupPartitionLists[netId].empty()) {
3018  if (netId < CPU_RUNTIME_BASE) // GPU runtime
3019  globalStateUpdate_G_GPU(netId);
3020  }
3021  }
3022 }
3023 
3024 void SNN::clearExtFiringTable() {
3025  #ifndef __NO_PTHREADS__ // POSIX
3026  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
3027  cpu_set_t cpus;
3028  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
3029  int threadCount = 0;
3030  #endif
3031 
3032  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3033  if (!groupPartitionLists[netId].empty()) {
3034  if (netId < CPU_RUNTIME_BASE) // GPU runtime
3035  clearExtFiringTable_GPU(netId);
3036  else{ // CPU runtime
3037  #ifdef __NO_PTHREADS__
3038  clearExtFiringTable_CPU(netId);
3039  #else // Linux or MAC
3040  pthread_attr_t attr;
3041  pthread_attr_init(&attr);
3042  CPU_ZERO(&cpus);
3043  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
3044  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
3045 
3046  argsThreadRoutine[threadCount].snn_pointer = this;
3047  argsThreadRoutine[threadCount].netId = netId;
3048  argsThreadRoutine[threadCount].lGrpId = 0;
3049  argsThreadRoutine[threadCount].startIdx = 0;
3050  argsThreadRoutine[threadCount].endIdx = 0;
3051  argsThreadRoutine[threadCount].GtoLOffset = 0;
3052 
3053  pthread_create(&threads[threadCount], &attr, &SNN::helperClearExtFiringTable_CPU, (void*)&argsThreadRoutine[threadCount]);
3054  pthread_attr_destroy(&attr);
3055  threadCount++;
3056  #endif
3057  }
3058  }
3059  }
3060 
3061  #ifndef __NO_PTHREADS__ // POSIX
3062  // join all the threads
3063  for (int i=0; i<threadCount; i++){
3064  pthread_join(threads[i], NULL);
3065  }
3066  #endif
3067 }
3068 
3069 void SNN::updateWeights() {
3070  #ifndef __NO_PTHREADS__ // POSIX
3071  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
3072  cpu_set_t cpus;
3073  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
3074  int threadCount = 0;
3075  #endif
3076 
3077  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3078  if (!groupPartitionLists[netId].empty()) {
3079  if (netId < CPU_RUNTIME_BASE) // GPU runtime
3080  updateWeights_GPU(netId);
3081  else{ // CPU runtime
3082  #ifdef __NO_PTHREADS__
3083  updateWeights_CPU(netId);
3084  #else // Linux or MAC
3085  pthread_attr_t attr;
3086  pthread_attr_init(&attr);
3087  CPU_ZERO(&cpus);
3088  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
3089  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
3090 
3091  argsThreadRoutine[threadCount].snn_pointer = this;
3092  argsThreadRoutine[threadCount].netId = netId;
3093  argsThreadRoutine[threadCount].lGrpId = 0;
3094  argsThreadRoutine[threadCount].startIdx = 0;
3095  argsThreadRoutine[threadCount].endIdx = 0;
3096  argsThreadRoutine[threadCount].GtoLOffset = 0;
3097 
3098  pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateWeights_CPU, (void*)&argsThreadRoutine[threadCount]);
3099  pthread_attr_destroy(&attr);
3100  threadCount++;
3101  #endif
3102  }
3103  }
3104  }
3105  #ifndef __NO_PTHREADS__ // POSIX
3106  // join all the threads
3107  for (int i=0; i<threadCount; i++){
3108  pthread_join(threads[i], NULL);
3109  }
3110  #endif
3111 
3112 }
3113 
3114 void SNN::updateNetworkConfig(int netId) {
3115  assert(netId < MAX_NET_PER_SNN);
3116 
3117  if (netId < CPU_RUNTIME_BASE) // GPU runtime
3118  copyNetworkConfig(netId, cudaMemcpyHostToDevice);
3119  else
3120  copyNetworkConfig(netId); // CPU runtime
3121 }
3122 
3123 void SNN::shiftSpikeTables() {
3124  #ifndef __NO_PTHREADS__ // POSIX
3125  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
3126  cpu_set_t cpus;
3127  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
3128  int threadCount = 0;
3129  #endif
3130 
3131  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3132  if (!groupPartitionLists[netId].empty()) {
3133  if (netId < CPU_RUNTIME_BASE) // GPU runtime
3134  shiftSpikeTables_F_GPU(netId);
3135  else { // CPU runtime
3136  #ifdef __NO_PTHREADS__
3137  shiftSpikeTables_CPU(netId);
3138  #else // Linux or MAC
3139  pthread_attr_t attr;
3140  pthread_attr_init(&attr);
3141  CPU_ZERO(&cpus);
3142  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
3143  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
3144 
3145  argsThreadRoutine[threadCount].snn_pointer = this;
3146  argsThreadRoutine[threadCount].netId = netId;
3147  argsThreadRoutine[threadCount].lGrpId = 0;
3148  argsThreadRoutine[threadCount].startIdx = 0;
3149  argsThreadRoutine[threadCount].endIdx = 0;
3150  argsThreadRoutine[threadCount].GtoLOffset = 0;
3151 
3152  pthread_create(&threads[threadCount], &attr, &SNN::helperShiftSpikeTables_CPU, (void*)&argsThreadRoutine[threadCount]);
3153  pthread_attr_destroy(&attr);
3154  threadCount++;
3155  #endif
3156  }
3157  }
3158  }
3159 
3160  #ifndef __NO_PTHREADS__ // POSIX
3161  // join all the threads
3162  for (int i=0; i<threadCount; i++){
3163  pthread_join(threads[i], NULL);
3164  }
3165  #endif
3166 
3167  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3168  if (!groupPartitionLists[netId].empty()) {
3169  if (netId < CPU_RUNTIME_BASE) // GPU runtime
3170  shiftSpikeTables_T_GPU(netId);
3171  }
3172  }
3173 }
3174 
3175 void SNN::allocateSNN(int netId) {
3176  assert(netId > ANY && netId < MAX_NET_PER_SNN);
3177 
3178  if (netId < CPU_RUNTIME_BASE)
3179  allocateSNN_GPU(netId);
3180  else
3181  allocateSNN_CPU(netId);
3182 }
3183 
3184 void SNN::allocateManagerRuntimeData() {
3185  // reset variable related to spike count
3186  managerRuntimeData.spikeCountSec = 0;
3187  managerRuntimeData.spikeCountD1Sec = 0;
3188  managerRuntimeData.spikeCountD2Sec = 0;
3189  managerRuntimeData.spikeCountLastSecLeftD2 = 0;
3190  managerRuntimeData.spikeCount = 0;
3191  managerRuntimeData.spikeCountD1 = 0;
3192  managerRuntimeData.spikeCountD2 = 0;
3193  managerRuntimeData.nPoissonSpikes = 0;
3194  managerRuntimeData.spikeCountExtRxD1 = 0;
3195  managerRuntimeData.spikeCountExtRxD2 = 0;
3196 
3197  managerRuntimeData.voltage = new float[managerRTDSize.maxNumNReg];
3198  managerRuntimeData.nextVoltage = new float[managerRTDSize.maxNumNReg];
3199  managerRuntimeData.recovery = new float[managerRTDSize.maxNumNReg];
3200  managerRuntimeData.Izh_a = new float[managerRTDSize.maxNumNReg];
3201  managerRuntimeData.Izh_b = new float[managerRTDSize.maxNumNReg];
3202  managerRuntimeData.Izh_c = new float[managerRTDSize.maxNumNReg];
3203  managerRuntimeData.Izh_d = new float[managerRTDSize.maxNumNReg];
3204  managerRuntimeData.Izh_C = new float[managerRTDSize.maxNumNReg];
3205  managerRuntimeData.Izh_k = new float[managerRTDSize.maxNumNReg];
3206  managerRuntimeData.Izh_vr = new float[managerRTDSize.maxNumNReg];
3207  managerRuntimeData.Izh_vt = new float[managerRTDSize.maxNumNReg];
3208  managerRuntimeData.Izh_vpeak = new float[managerRTDSize.maxNumNReg];
3209  managerRuntimeData.lif_tau_m = new int[managerRTDSize.maxNumNReg];
3210  managerRuntimeData.lif_tau_ref = new int[managerRTDSize.maxNumNReg];
3211  managerRuntimeData.lif_tau_ref_c = new int[managerRTDSize.maxNumNReg];
3212  managerRuntimeData.lif_vTh = new float[managerRTDSize.maxNumNReg];
3213  managerRuntimeData.lif_vReset = new float[managerRTDSize.maxNumNReg];
3214  managerRuntimeData.lif_gain = new float[managerRTDSize.maxNumNReg];
3215  managerRuntimeData.lif_bias = new float[managerRTDSize.maxNumNReg];
3216  managerRuntimeData.current = new float[managerRTDSize.maxNumNReg];
3217  managerRuntimeData.extCurrent = new float[managerRTDSize.maxNumNReg];
3218  managerRuntimeData.totalCurrent = new float[managerRTDSize.maxNumNReg];
3219  managerRuntimeData.curSpike = new bool[managerRTDSize.maxNumNReg];
3220  memset(managerRuntimeData.voltage, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3221  memset(managerRuntimeData.nextVoltage, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3222  memset(managerRuntimeData.recovery, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3223  memset(managerRuntimeData.Izh_a, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3224  memset(managerRuntimeData.Izh_b, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3225  memset(managerRuntimeData.Izh_c, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3226  memset(managerRuntimeData.Izh_d, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3227  memset(managerRuntimeData.Izh_C, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3228  memset(managerRuntimeData.Izh_k, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3229  memset(managerRuntimeData.Izh_vr, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3230  memset(managerRuntimeData.Izh_vt, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3231  memset(managerRuntimeData.Izh_vpeak, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3232  memset(managerRuntimeData.lif_tau_m, 0, sizeof(int) * managerRTDSize.maxNumNReg);
3233  memset(managerRuntimeData.lif_tau_ref, 0, sizeof(int) * managerRTDSize.maxNumNReg);
3234  memset(managerRuntimeData.lif_tau_ref_c, 0, sizeof(int) * managerRTDSize.maxNumNReg);
3235  memset(managerRuntimeData.lif_vTh, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3236  memset(managerRuntimeData.lif_vReset, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3237  memset(managerRuntimeData.lif_gain, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3238  memset(managerRuntimeData.lif_bias, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3239  memset(managerRuntimeData.current, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3240  memset(managerRuntimeData.extCurrent, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3241  memset(managerRuntimeData.totalCurrent, 0, sizeof(float) * managerRTDSize.maxNumNReg);
3242  memset(managerRuntimeData.curSpike, 0, sizeof(bool) * managerRTDSize.maxNumNReg);
3243 
3244  managerRuntimeData.nVBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups]; // 1 second v buffer
3245  managerRuntimeData.nUBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups];
3246  managerRuntimeData.nIBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups];
3247  memset(managerRuntimeData.nVBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
3248  memset(managerRuntimeData.nUBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
3249  memset(managerRuntimeData.nIBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
3250 
3251  managerRuntimeData.gAMPA = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3252  managerRuntimeData.gNMDA_r = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3253  managerRuntimeData.gNMDA_d = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3254  managerRuntimeData.gNMDA = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3255  memset(managerRuntimeData.gAMPA, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3256  memset(managerRuntimeData.gNMDA_r, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3257  memset(managerRuntimeData.gNMDA_d, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3258  memset(managerRuntimeData.gNMDA, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3259 
3260  managerRuntimeData.gGABAa = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3261  managerRuntimeData.gGABAb_r = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3262  managerRuntimeData.gGABAb_d = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3263  managerRuntimeData.gGABAb = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
3264  memset(managerRuntimeData.gGABAa, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3265  memset(managerRuntimeData.gGABAb_r, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3266  memset(managerRuntimeData.gGABAb_d, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3267  memset(managerRuntimeData.gGABAb, 0, sizeof(float) * managerRTDSize.glbNumNReg);
3268 
3269  // allocate neuromodulators and their assistive buffers
3270  managerRuntimeData.grpDA = new float[managerRTDSize.maxNumGroups];
3271  managerRuntimeData.grp5HT = new float[managerRTDSize.maxNumGroups];
3272  managerRuntimeData.grpACh = new float[managerRTDSize.maxNumGroups];
3273  managerRuntimeData.grpNE = new float[managerRTDSize.maxNumGroups];
3274  memset(managerRuntimeData.grpDA, 0, sizeof(float) * managerRTDSize.maxNumGroups);
3275  memset(managerRuntimeData.grp5HT, 0, sizeof(float) * managerRTDSize.maxNumGroups);
3276  memset(managerRuntimeData.grpACh, 0, sizeof(float) * managerRTDSize.maxNumGroups);
3277  memset(managerRuntimeData.grpNE, 0, sizeof(float) * managerRTDSize.maxNumGroups);
3278 
3279 
3280  managerRuntimeData.grpDABuffer = new float[managerRTDSize.maxNumGroups * 1000]; // 1 second DA buffer
3281  managerRuntimeData.grp5HTBuffer = new float[managerRTDSize.maxNumGroups * 1000];
3282  managerRuntimeData.grpAChBuffer = new float[managerRTDSize.maxNumGroups * 1000];
3283  managerRuntimeData.grpNEBuffer = new float[managerRTDSize.maxNumGroups * 1000];
3284  memset(managerRuntimeData.grpDABuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
3285  memset(managerRuntimeData.grp5HTBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
3286  memset(managerRuntimeData.grpAChBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
3287  memset(managerRuntimeData.grpNEBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
3288 
3289  managerRuntimeData.lastSpikeTime = new int[managerRTDSize.maxNumNAssigned];
3290  memset(managerRuntimeData.lastSpikeTime, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
3291 
3292  managerRuntimeData.nSpikeCnt = new int[managerRTDSize.glbNumN];
3293  memset(managerRuntimeData.nSpikeCnt, 0, sizeof(int) * managerRTDSize.glbNumN); // sufficient to hold all neurons in the global network
3294 
3296  managerRuntimeData.avgFiring = new float[managerRTDSize.maxNumN];
3297  managerRuntimeData.baseFiring = new float[managerRTDSize.maxNumN];
3298  memset(managerRuntimeData.avgFiring, 0, sizeof(float) * managerRTDSize.maxNumN);
3299  memset(managerRuntimeData.baseFiring, 0, sizeof(float) * managerRTDSize.maxNumN);
3300 
3301  // STP can be applied to spike generators, too -> numN
3302  // \TODO: The size of these data structures could be reduced to the max synaptic delay of all
3303  // connections with STP. That number might not be the same as maxDelay_.
3304  managerRuntimeData.stpu = new float[managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1)];
3305  managerRuntimeData.stpx = new float[managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1)];
3306  memset(managerRuntimeData.stpu, 0, sizeof(float) * managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1));
3307  memset(managerRuntimeData.stpx, 0, sizeof(float) * managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1));
3308 
3309  managerRuntimeData.Npre = new unsigned short[managerRTDSize.maxNumNAssigned];
3310  managerRuntimeData.Npre_plastic = new unsigned short[managerRTDSize.maxNumNAssigned];
3311  managerRuntimeData.Npost = new unsigned short[managerRTDSize.maxNumNAssigned];
3312  managerRuntimeData.cumulativePost = new unsigned int[managerRTDSize.maxNumNAssigned];
3313  managerRuntimeData.cumulativePre = new unsigned int[managerRTDSize.maxNumNAssigned];
3314  memset(managerRuntimeData.Npre, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
3315  memset(managerRuntimeData.Npre_plastic, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
3316  memset(managerRuntimeData.Npost, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
3317  memset(managerRuntimeData.cumulativePost, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
3318  memset(managerRuntimeData.cumulativePre, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
3319 
3320  managerRuntimeData.postSynapticIds = new SynInfo[managerRTDSize.maxNumPostSynNet];
3321  managerRuntimeData.postDelayInfo = new DelayInfo[managerRTDSize.maxNumNAssigned * (glbNetworkConfig.maxDelay + 1)];
3322  memset(managerRuntimeData.postSynapticIds, 0, sizeof(SynInfo) * managerRTDSize.maxNumPostSynNet);
3323  memset(managerRuntimeData.postDelayInfo, 0, sizeof(DelayInfo) * managerRTDSize.maxNumNAssigned * (glbNetworkConfig.maxDelay + 1));
3324 
3325  managerRuntimeData.preSynapticIds = new SynInfo[managerRTDSize.maxNumPreSynNet];
3326  memset(managerRuntimeData.preSynapticIds, 0, sizeof(SynInfo) * managerRTDSize.maxNumPreSynNet);
3327 
3328  managerRuntimeData.wt = new float[managerRTDSize.maxNumPreSynNet];
3329  managerRuntimeData.wtChange = new float[managerRTDSize.maxNumPreSynNet];
3330  managerRuntimeData.maxSynWt = new float[managerRTDSize.maxNumPreSynNet];
3331  managerRuntimeData.synSpikeTime = new int[managerRTDSize.maxNumPreSynNet];
3332  memset(managerRuntimeData.wt, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
3333  memset(managerRuntimeData.wtChange, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
3334  memset(managerRuntimeData.maxSynWt, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
3335  memset(managerRuntimeData.synSpikeTime, 0, sizeof(int) * managerRTDSize.maxNumPreSynNet);
3336 
3337  mulSynFast = new float[managerRTDSize.maxNumConnections];
3338  mulSynSlow = new float[managerRTDSize.maxNumConnections];
3339  memset(mulSynFast, 0, sizeof(float) * managerRTDSize.maxNumConnections);
3340  memset(mulSynSlow, 0, sizeof(float) * managerRTDSize.maxNumConnections);
3341 
3342  managerRuntimeData.connIdsPreIdx = new short int[managerRTDSize.maxNumPreSynNet];
3343  memset(managerRuntimeData.connIdsPreIdx, 0, sizeof(short int) * managerRTDSize.maxNumPreSynNet);
3344 
3345  managerRuntimeData.grpIds = new short int[managerRTDSize.maxNumNAssigned];
3346  memset(managerRuntimeData.grpIds, 0, sizeof(short int) * managerRTDSize.maxNumNAssigned);
3347 
3348  managerRuntimeData.spikeGenBits = new unsigned int[managerRTDSize.maxNumNSpikeGen / 32 + 1];
3349 
3350  managerRuntimeData.randNum = new float[managerRTDSize.maxNumNPois];
3351  managerRuntimeData.poissonFireRate = new float[managerRTDSize.maxNumNPois];
3352 
3353 
3354  // Confirm allocation of SNN runtime data in main memory
3355  managerRuntimeData.allocated = true;
3356  managerRuntimeData.memType = CPU_MEM;
3357 }
3358 
3359 int SNN::assignGroup(int gGrpId, int availableNeuronId) {
3360  int newAvailableNeuronId;
3361  assert(groupConfigMDMap[gGrpId].gStartN == -1); // The group has not yet been assigned
3362  groupConfigMDMap[gGrpId].gStartN = availableNeuronId;
3363  groupConfigMDMap[gGrpId].gEndN = availableNeuronId + groupConfigMap[gGrpId].numN - 1;
3364 
3365  KERNEL_DEBUG("Allocation for %d(%s), St=%d, End=%d",
3366  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMDMap[gGrpId].gStartN, groupConfigMDMap[gGrpId].gEndN);
3367 
3368  newAvailableNeuronId = availableNeuronId + groupConfigMap[gGrpId].numN;
3369  //assert(newAvailableNeuronId <= numN);
3370 
3371  return newAvailableNeuronId;
3372 }
3373 
3374 int SNN::assignGroup(std::list<GroupConfigMD>::iterator grpIt, int localGroupId, int availableNeuronId) {
3375  int newAvailableNeuronId;
3376  assert(grpIt->lGrpId == -1); // The group has not yet been assigned
3377  grpIt->lGrpId = localGroupId;
3378  grpIt->lStartN = availableNeuronId;
3379  grpIt->lEndN = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN - 1;
3380 
3381  grpIt->LtoGOffset = grpIt->gStartN - grpIt->lStartN;
3382  grpIt->GtoLOffset = grpIt->lStartN - grpIt->gStartN;
3383 
3384  KERNEL_DEBUG("Allocation for group (%s) [id:%d, local id:%d], St=%d, End=%d", groupConfigMap[grpIt->gGrpId].grpName.c_str(),
3385  grpIt->gGrpId, grpIt->lGrpId, grpIt->lStartN, grpIt->lEndN);
3386 
3387  newAvailableNeuronId = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN;
3388 
3389  return newAvailableNeuronId;
3390 }
3391 
3392 void SNN::generateGroupRuntime(int netId, int lGrpId) {
3393  resetNeuromodulator(netId, lGrpId);
3394 
3395  for(int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
3396  resetNeuron(netId, lGrpId, lNId);
3397 }
3398 
3399 void SNN::generateRuntimeGroupConfigs() {
3400  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3401  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3402  // publish the group configs in an array for quick access and accessible on GPUs (cuda doesn't support std::list)
3403  int gGrpId = grpIt->gGrpId;
3404  int lGrpId = grpIt->lGrpId;
3405 
3406  // Data published by groupConfigMDMap[] are generated in compileSNN() and are invariant in partitionSNN()
3407  // Data published by grpIt are generated in partitionSNN() and maybe have duplicated copys
3408  groupConfigs[netId][lGrpId].netId = grpIt->netId;
3409  groupConfigs[netId][lGrpId].gGrpId = grpIt->gGrpId;
3410  groupConfigs[netId][lGrpId].gStartN = grpIt->gStartN;
3411  groupConfigs[netId][lGrpId].gEndN = grpIt->gEndN;
3412  groupConfigs[netId][lGrpId].lGrpId = grpIt->lGrpId;
3413  groupConfigs[netId][lGrpId].lStartN = grpIt->lStartN;
3414  groupConfigs[netId][lGrpId].lEndN = grpIt->lEndN;
3415  groupConfigs[netId][lGrpId].LtoGOffset = grpIt->LtoGOffset;
3416  groupConfigs[netId][lGrpId].GtoLOffset = grpIt->GtoLOffset;
3417  groupConfigs[netId][lGrpId].Type = groupConfigMap[gGrpId].type;
3418  groupConfigs[netId][lGrpId].numN = groupConfigMap[gGrpId].numN;
3419  groupConfigs[netId][lGrpId].numPostSynapses = grpIt->numPostSynapses;
3420  groupConfigs[netId][lGrpId].numPreSynapses = grpIt->numPreSynapses;
3421  groupConfigs[netId][lGrpId].isSpikeGenerator = groupConfigMap[gGrpId].isSpikeGenerator;
3422  groupConfigs[netId][lGrpId].isSpikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc != NULL ? true : false;
3423  groupConfigs[netId][lGrpId].WithSTP = groupConfigMap[gGrpId].stpConfig.WithSTP;
3424  groupConfigs[netId][lGrpId].WithSTDP = groupConfigMap[gGrpId].WithSTDP;
3425  groupConfigs[netId][lGrpId].WithDA_MOD = groupConfigMap[gGrpId].WithDA_MOD;
3426 
3427  // groupConfigs[netId][lGrpId].WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
3428  // groupConfigs[netId][lGrpId].WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
3429  // groupConfigs[netId][lGrpId].WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
3430  // groupConfigs[netId][lGrpId].WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
3431  // groupConfigs[netId][lGrpId].WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
3432  // groupConfigs[netId][lGrpId].WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
3433  groupConfigs[netId][lGrpId].WithHomeostasis = groupConfigMap[gGrpId].homeoConfig.WithHomeostasis;
3434 #ifdef LN_I_CALC_TYPES
3435  groupConfigs[netId][lGrpId].with_NMDA_rise = groupConfigMap[gGrpId].with_NMDA_rise;
3436  groupConfigs[netId][lGrpId].with_GABAb_rise = groupConfigMap[gGrpId].with_GABAb_rise;
3437 #endif
3438  groupConfigs[netId][lGrpId].FixedInputWts = grpIt->fixedInputWts;
3439  groupConfigs[netId][lGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3440  groupConfigs[netId][lGrpId].Noffset = grpIt->Noffset; // Note: Noffset is not valid at this time
3441  groupConfigs[netId][lGrpId].MaxDelay = grpIt->maxOutgoingDelay;
3442  groupConfigs[netId][lGrpId].STP_A = groupConfigMap[gGrpId].stpConfig.STP_A;
3443  groupConfigs[netId][lGrpId].STP_U = groupConfigMap[gGrpId].stpConfig.STP_U;
3444  groupConfigs[netId][lGrpId].STP_tau_u_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv;
3445  groupConfigs[netId][lGrpId].STP_tau_x_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv;
3446  // groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
3447  // groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC;
3448  // groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
3449  // groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
3450  // groupConfigs[netId][lGrpId].GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
3451  // groupConfigs[netId][lGrpId].KAPPA = groupConfigMap[gGrpId].stdpConfig.KAPPA;
3452  // groupConfigs[netId][lGrpId].OMEGA = groupConfigMap[gGrpId].stdpConfig.OMEGA;
3453  // groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
3454  // groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB;
3455  // groupConfigs[netId][lGrpId].ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
3456  // groupConfigs[netId][lGrpId].ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
3457  // groupConfigs[netId][lGrpId].BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
3458  // groupConfigs[netId][lGrpId].BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
3459  // groupConfigs[netId][lGrpId].LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
3460  // groupConfigs[netId][lGrpId].DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
3461 
3462  groupConfigs[netId][lGrpId].numCompNeighbors = 0;
3463  groupConfigs[netId][lGrpId].withCompartments = groupConfigMap[gGrpId].withCompartments;
3464  groupConfigs[netId][lGrpId].compCouplingUp = groupConfigMap[gGrpId].compCouplingUp;
3465  groupConfigs[netId][lGrpId].compCouplingDown = groupConfigMap[gGrpId].compCouplingDown;
3466  memset(&groupConfigs[netId][lGrpId].compNeighbors, 0, sizeof(groupConfigs[netId][lGrpId].compNeighbors[0]) * MAX_NUM_COMP_CONN);
3467  memset(&groupConfigs[netId][lGrpId].compCoupling, 0, sizeof(groupConfigs[netId][lGrpId].compCoupling[0]) * MAX_NUM_COMP_CONN);
3468 
3470  groupConfigs[netId][lGrpId].avgTimeScale = groupConfigMap[gGrpId].homeoConfig.avgTimeScale;
3471  groupConfigs[netId][lGrpId].avgTimeScale_decay = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay;
3472  groupConfigs[netId][lGrpId].avgTimeScaleInv = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv;
3473  groupConfigs[netId][lGrpId].homeostasisScale = groupConfigMap[gGrpId].homeoConfig.homeostasisScale;
3474 
3475  // parameters of neuromodulator
3476  groupConfigs[netId][lGrpId].baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
3477  groupConfigs[netId][lGrpId].base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
3478  groupConfigs[netId][lGrpId].baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
3479  groupConfigs[netId][lGrpId].baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
3480  groupConfigs[netId][lGrpId].decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
3481  groupConfigs[netId][lGrpId].decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
3482  groupConfigs[netId][lGrpId].decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
3483  groupConfigs[netId][lGrpId].decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
3484  groupConfigs[netId][lGrpId].releaseDP = groupConfigMap[gGrpId].neuromodulatorConfig.releaseDP;
3485  groupConfigs[netId][lGrpId].release5HT = groupConfigMap[gGrpId].neuromodulatorConfig.release5HT;
3486  groupConfigs[netId][lGrpId].releaseACh = groupConfigMap[gGrpId].neuromodulatorConfig.releaseACh;
3487  groupConfigs[netId][lGrpId].releaseNE = groupConfigMap[gGrpId].neuromodulatorConfig.releaseNE;
3488  groupConfigs[netId][lGrpId].activeDP = groupConfigMap[gGrpId].neuromodulatorConfig.activeDP;
3489  groupConfigs[netId][lGrpId].active5HT = groupConfigMap[gGrpId].neuromodulatorConfig.active5HT;
3490  groupConfigs[netId][lGrpId].activeACh = groupConfigMap[gGrpId].neuromodulatorConfig.activeACh;
3491  groupConfigs[netId][lGrpId].activeNE = groupConfigMap[gGrpId].neuromodulatorConfig.activeNE;
3492 
3493 #ifdef LN_I_CALC_TYPES
3494  // parameters of IcalcTypes
3495  if (groupConfigMap[gGrpId].icalcType == UNKNOWN_ICALC)
3496  {
3497  KERNEL_ERROR("IcalcType is unknwon in group [%d] ", gGrpId);
3499  }
3500  else {
3501  groupConfigs[netId][lGrpId].icalcType = groupConfigMap[gGrpId].icalcType;
3502  }
3503  // conductances
3504  groupConfigs[netId][lGrpId].dAMPA = groupConfigMap[gGrpId].conductanceConfig.dAMPA;
3505  groupConfigs[netId][lGrpId].rNMDA = groupConfigMap[gGrpId].conductanceConfig.rNMDA;
3506  groupConfigs[netId][lGrpId].dNMDA = groupConfigMap[gGrpId].conductanceConfig.dNMDA;
3507  groupConfigs[netId][lGrpId].sNMDA = groupConfigMap[gGrpId].conductanceConfig.sNMDA;
3508  groupConfigs[netId][lGrpId].dGABAa = groupConfigMap[gGrpId].conductanceConfig.dGABAa;
3509  groupConfigs[netId][lGrpId].rGABAb = groupConfigMap[gGrpId].conductanceConfig.rGABAb;
3510  groupConfigs[netId][lGrpId].dGABAb = groupConfigMap[gGrpId].conductanceConfig.dGABAb;
3511  groupConfigs[netId][lGrpId].sGABAb = groupConfigMap[gGrpId].conductanceConfig.sGABAb;
3512  // NM4
3513  for (int i = 0; i < NM_NE + 3; i++) // \todo LN doc intern <= due NM_UNKNOWN+2 , last elements are normalization and base
3514  groupConfigs[netId][lGrpId].nm4w[i] = groupConfigMap[gGrpId].nm4wConfig.w[i];
3515  // NM4STP
3516  groupConfigs[netId][lGrpId].WithNM4STP = groupConfigMap[gGrpId].nm4StpConfig.WithNM4STP;
3517  for (int i = 0; i < NM_NE + 3; i++) {
3518  groupConfigs[netId][lGrpId].wstpu[i] = groupConfigMap[gGrpId].nm4StpConfig.w_STP_U[i];
3519  groupConfigs[netId][lGrpId].wstptauu[i] = groupConfigMap[gGrpId].nm4StpConfig.w_STP_tau_u[i];
3520  groupConfigs[netId][lGrpId].wstptaux[i] = groupConfigMap[gGrpId].nm4StpConfig.w_STP_tau_x[i];
3521  }
3522 
3523  // NM4STP 128
3524  //assert(groupConfigs[netId][lGrpId].nm4stp == nullptr); // must be called only once, memset(0) see SNN::SNNinit()
3525  //groupConfigs[netId][lGrpId].nm4stp = new NM4STPConfig(groupConfigMap[gGrpId].nm4StpConfig);
3526  //printf("DEBUG: groupConfigs[%d][%d].nm4stp = new NM4STPConfig(groupConfigMap[%d].nm4StpConfig); %s %d \n", netId, lGrpId, gGrpId, __FILE__, __LINE__);
3527  // \todo memory management, -> delete groupConfigs[netId][lGrpId].nm4stp;
3528 #endif
3529 
3530 #ifdef LN_AXON_PLAST
3531  groupConfigs[netId][lGrpId].WithAxonPlast = groupConfigMap[gGrpId].WithAxonPlast;
3532  groupConfigs[netId][lGrpId].AxonPlast_TAU = groupConfigMap[gGrpId].AxonPlast_TAU;
3533 #endif
3534 
3535  // sync groupConfigs[][] and groupConfigMDMap[]
3536  if (netId == grpIt->netId) {
3537  groupConfigMDMap[gGrpId].netId = grpIt->netId;
3538  groupConfigMDMap[gGrpId].gGrpId = grpIt->gGrpId;
3539  groupConfigMDMap[gGrpId].gStartN = grpIt->gStartN;
3540  groupConfigMDMap[gGrpId].gEndN = grpIt->gEndN;
3541  groupConfigMDMap[gGrpId].lGrpId = grpIt->lGrpId;
3542  groupConfigMDMap[gGrpId].lStartN = grpIt->lStartN;
3543  groupConfigMDMap[gGrpId].lEndN = grpIt->lEndN;
3544  groupConfigMDMap[gGrpId].numPostSynapses = grpIt->numPostSynapses;
3545  groupConfigMDMap[gGrpId].numPreSynapses = grpIt->numPreSynapses;
3546  groupConfigMDMap[gGrpId].LtoGOffset = grpIt->LtoGOffset;
3547  groupConfigMDMap[gGrpId].GtoLOffset = grpIt->GtoLOffset;
3548  groupConfigMDMap[gGrpId].fixedInputWts = grpIt->fixedInputWts;
3549  groupConfigMDMap[gGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3550  groupConfigMDMap[gGrpId].Noffset = grpIt->Noffset; // Note: Noffset is not valid at this time
3551  groupConfigMDMap[gGrpId].maxOutgoingDelay = grpIt->maxOutgoingDelay;
3552  }
3553  groupConfigs[netId][lGrpId].withParamModel_9 = groupConfigMap[gGrpId].withParamModel_9;
3554  groupConfigs[netId][lGrpId].isLIF = groupConfigMap[gGrpId].isLIF;
3555 
3556  }
3557 
3558  // FIXME: How does networkConfigs[netId].numGroups be availabe at this time?! Bug?!
3559  //int numNSpikeGen = 0;
3560  //for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3561  // if (netId == groupConfigs[netId][lGrpId].netId && groupConfigs[netId][lGrpId].isSpikeGenerator && groupConfigs[netId][lGrpId].isSpikeGenFunc) {
3562  // // we only need numNSpikeGen for spike generator callbacks that need to transfer their spikes to the GPU
3563  // groupConfigs[netId][lGrpId].Noffset = numNSpikeGen; // FIXME, Noffset is updated after publish group configs
3564  // numNSpikeGen += groupConfigs[netId][lGrpId].numN;
3565  // }
3566  //}
3567  //assert(numNSpikeGen <= networkConfigs[netId].numNPois);
3568  }
3569 }
3570 
3571 void SNN::generateRuntimeConnectConfigs() {
3572  // sync localConnectLists and connectConfigMap
3573  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3574  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3575  int lConnId = connIt->connId;
3576  connectConfigMap[lConnId] = *connIt;
3577 
3578  connectConfigs[netId][lConnId].grpSrc = connIt->grpSrc;
3579  connectConfigs[netId][lConnId].grpDest = connIt->grpDest;
3580 
3581  connectConfigs[netId][lConnId].WithSTDP = connectConfigMap[lConnId].stdpConfig.WithSTDP;
3582  connectConfigs[netId][lConnId].WithESTDP = connectConfigMap[lConnId].stdpConfig.WithESTDP;
3583  connectConfigs[netId][lConnId].WithISTDP = connectConfigMap[lConnId].stdpConfig.WithISTDP;
3584  connectConfigs[netId][lConnId].WithESTDPtype = connectConfigMap[lConnId].stdpConfig.WithESTDPtype;
3585  connectConfigs[netId][lConnId].WithISTDPtype = connectConfigMap[lConnId].stdpConfig.WithISTDPtype;
3586  connectConfigs[netId][lConnId].WithESTDPcurve = connectConfigMap[lConnId].stdpConfig.WithESTDPcurve;
3587  connectConfigs[netId][lConnId].WithISTDPcurve = connectConfigMap[lConnId].stdpConfig.WithISTDPcurve;
3588 
3589  connectConfigs[netId][lConnId].TAU_PLUS_INV_EXC = connectConfigMap[lConnId].stdpConfig.TAU_PLUS_INV_EXC;
3590  connectConfigs[netId][lConnId].TAU_MINUS_INV_EXC = connectConfigMap[lConnId].stdpConfig.TAU_MINUS_INV_EXC;
3591  connectConfigs[netId][lConnId].ALPHA_PLUS_EXC = connectConfigMap[lConnId].stdpConfig.ALPHA_PLUS_EXC;
3592  connectConfigs[netId][lConnId].ALPHA_MINUS_EXC = connectConfigMap[lConnId].stdpConfig.ALPHA_MINUS_EXC;
3593  connectConfigs[netId][lConnId].GAMMA = connectConfigMap[lConnId].stdpConfig.GAMMA;
3594  connectConfigs[netId][lConnId].KAPPA = connectConfigMap[lConnId].stdpConfig.KAPPA;
3595  connectConfigs[netId][lConnId].OMEGA = connectConfigMap[lConnId].stdpConfig.OMEGA;
3596  connectConfigs[netId][lConnId].TAU_PLUS_INV_INB = connectConfigMap[lConnId].stdpConfig.TAU_PLUS_INV_INB;
3597  connectConfigs[netId][lConnId].TAU_MINUS_INV_INB = connectConfigMap[lConnId].stdpConfig.TAU_MINUS_INV_INB;
3598  connectConfigs[netId][lConnId].ALPHA_PLUS_INB = connectConfigMap[lConnId].stdpConfig.ALPHA_PLUS_INB;
3599  connectConfigs[netId][lConnId].ALPHA_MINUS_INB = connectConfigMap[lConnId].stdpConfig.ALPHA_MINUS_INB;
3600  connectConfigs[netId][lConnId].BETA_LTP = connectConfigMap[lConnId].stdpConfig.BETA_LTP;
3601  connectConfigs[netId][lConnId].BETA_LTD = connectConfigMap[lConnId].stdpConfig.BETA_LTD;
3602  connectConfigs[netId][lConnId].LAMBDA = connectConfigMap[lConnId].stdpConfig.LAMBDA;
3603  connectConfigs[netId][lConnId].DELTA = connectConfigMap[lConnId].stdpConfig.DELTA;
3604 #ifdef LN_I_CALC_TYPES
3605  connectConfigs[netId][lConnId].NM_PKA = connectConfigMap[lConnId].stdpConfig.NM_PKA;
3606  connectConfigs[netId][lConnId].NM_PLC = connectConfigMap[lConnId].stdpConfig.NM_PLC;
3607  connectConfigs[netId][lConnId].W_PKA = connectConfigMap[lConnId].stdpConfig.W_PKA;
3608  connectConfigs[netId][lConnId].W_PLC = connectConfigMap[lConnId].stdpConfig.W_PLC;
3609  connectConfigs[netId][lConnId].icalcType = connectConfigMap[lConnId].icalcType;
3610 #endif
3611  }
3612 
3613  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3614  int lConnId = connIt->connId;
3615  connectConfigMap[lConnId] = *connIt;
3616 
3617  connectConfigs[netId][lConnId].grpSrc = connIt->grpSrc;
3618  connectConfigs[netId][lConnId].grpDest = connIt->grpDest;
3619 
3620  connectConfigs[netId][lConnId].WithSTDP = connectConfigMap[lConnId].stdpConfig.WithSTDP;
3621  connectConfigs[netId][lConnId].WithESTDP = connectConfigMap[lConnId].stdpConfig.WithESTDP;
3622  connectConfigs[netId][lConnId].WithISTDP = connectConfigMap[lConnId].stdpConfig.WithISTDP;
3623  connectConfigs[netId][lConnId].WithESTDPtype = connectConfigMap[lConnId].stdpConfig.WithESTDPtype;
3624  connectConfigs[netId][lConnId].WithISTDPtype = connectConfigMap[lConnId].stdpConfig.WithISTDPtype;
3625  connectConfigs[netId][lConnId].WithESTDPcurve = connectConfigMap[lConnId].stdpConfig.WithESTDPcurve;
3626  connectConfigs[netId][lConnId].WithISTDPcurve = connectConfigMap[lConnId].stdpConfig.WithISTDPcurve;
3627 
3628  connectConfigs[netId][lConnId].TAU_PLUS_INV_EXC = connectConfigMap[lConnId].stdpConfig.TAU_PLUS_INV_EXC;
3629  connectConfigs[netId][lConnId].TAU_MINUS_INV_EXC = connectConfigMap[lConnId].stdpConfig.TAU_MINUS_INV_EXC;
3630  connectConfigs[netId][lConnId].ALPHA_PLUS_EXC = connectConfigMap[lConnId].stdpConfig.ALPHA_PLUS_EXC;
3631  connectConfigs[netId][lConnId].ALPHA_MINUS_EXC = connectConfigMap[lConnId].stdpConfig.ALPHA_MINUS_EXC;
3632  connectConfigs[netId][lConnId].GAMMA = connectConfigMap[lConnId].stdpConfig.GAMMA;
3633  connectConfigs[netId][lConnId].KAPPA = connectConfigMap[lConnId].stdpConfig.KAPPA;
3634  connectConfigs[netId][lConnId].OMEGA = connectConfigMap[lConnId].stdpConfig.OMEGA;
3635  connectConfigs[netId][lConnId].TAU_PLUS_INV_INB = connectConfigMap[lConnId].stdpConfig.TAU_PLUS_INV_INB;
3636  connectConfigs[netId][lConnId].TAU_MINUS_INV_INB = connectConfigMap[lConnId].stdpConfig.TAU_MINUS_INV_INB;
3637  connectConfigs[netId][lConnId].ALPHA_PLUS_INB = connectConfigMap[lConnId].stdpConfig.ALPHA_PLUS_INB;
3638  connectConfigs[netId][lConnId].ALPHA_MINUS_INB = connectConfigMap[lConnId].stdpConfig.ALPHA_MINUS_INB;
3639  connectConfigs[netId][lConnId].BETA_LTP = connectConfigMap[lConnId].stdpConfig.BETA_LTP;
3640  connectConfigs[netId][lConnId].BETA_LTD = connectConfigMap[lConnId].stdpConfig.BETA_LTD;
3641  connectConfigs[netId][lConnId].LAMBDA = connectConfigMap[lConnId].stdpConfig.LAMBDA;
3642  connectConfigs[netId][lConnId].DELTA = connectConfigMap[lConnId].stdpConfig.DELTA;
3643 #ifdef LN_I_CALC_TYPES
3644  connectConfigs[netId][lConnId].NM_PKA = connectConfigMap[lConnId].stdpConfig.NM_PKA;
3645  connectConfigs[netId][lConnId].NM_PLC = connectConfigMap[lConnId].stdpConfig.NM_PLC;
3646  connectConfigs[netId][lConnId].W_PKA = connectConfigMap[lConnId].stdpConfig.W_PKA;
3647  connectConfigs[netId][lConnId].W_PLC = connectConfigMap[lConnId].stdpConfig.W_PLC;
3648  connectConfigs[netId][lConnId].icalcType = connectConfigMap[lConnId].icalcType; // Fix LN 2021
3649 #endif
3650  }
3651  }
3652 }
3653 
3654 void SNN::generateRuntimeNetworkConfigs() {
3655  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3656  if (!groupPartitionLists[netId].empty()) {
3657  // copy the global network config to local network configs
3658  // global configuration for maximum axonal delay
3659  networkConfigs[netId].maxDelay = glbNetworkConfig.maxDelay;
3660 
3661  // configurations for execution features
3662  networkConfigs[netId].sim_with_fixedwts = sim_with_fixedwts;
3663 #define LN_I_CALC_TYPES__REQUIRED_FOR_NETWORK_LEVEL
3664  networkConfigs[netId].sim_with_conductances = sim_with_conductances;
3665  networkConfigs[netId].sim_with_homeostasis = sim_with_homeostasis;
3666  networkConfigs[netId].sim_with_stdp = sim_with_stdp;
3667  networkConfigs[netId].sim_with_stp = sim_with_stp;
3668  networkConfigs[netId].sim_in_testing = sim_in_testing;
3669 
3670  // search for active neuron monitor
3671  networkConfigs[netId].sim_with_nm = false;
3672  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3673  if (grpIt->netId == netId && grpIt->neuronMonitorId >= 0)
3674  networkConfigs[netId].sim_with_nm = true;
3675  }
3676 
3677  // stdp, da-stdp configurations
3678  networkConfigs[netId].stdpScaleFactor = stdpScaleFactor_;
3679  networkConfigs[netId].wtChangeDecay = wtChangeDecay_;
3680 
3681  // conductance configurations
3682 #define LN_I_CALC_TYPES__REQUIRED_FOR_NETWORK_LEVEL
3683  networkConfigs[netId].sim_with_NMDA_rise = sim_with_NMDA_rise;
3684  networkConfigs[netId].sim_with_GABAb_rise = sim_with_GABAb_rise;
3685 #ifndef LN_I_CALC_TYPES
3686  networkConfigs[netId].dAMPA = dAMPA;
3687  networkConfigs[netId].rNMDA = rNMDA;
3688  networkConfigs[netId].dNMDA = dNMDA;
3689  networkConfigs[netId].sNMDA = sNMDA;
3690  networkConfigs[netId].dGABAa = dGABAa;
3691  networkConfigs[netId].rGABAb = rGABAb;
3692  networkConfigs[netId].dGABAb = dGABAb;
3693  networkConfigs[netId].sGABAb = sGABAb;
3694 #endif
3695  networkConfigs[netId].simIntegrationMethod = glbNetworkConfig.simIntegrationMethod;
3696  networkConfigs[netId].simNumStepsPerMs = glbNetworkConfig.simNumStepsPerMs;
3697  networkConfigs[netId].timeStep = glbNetworkConfig.timeStep;
3698 
3699  // configurations for boundries of neural types
3700  findNumN(netId, networkConfigs[netId].numN, networkConfigs[netId].numNExternal, networkConfigs[netId].numNAssigned,
3701  networkConfigs[netId].numNReg, networkConfigs[netId].numNExcReg, networkConfigs[netId].numNInhReg,
3702  networkConfigs[netId].numNPois, networkConfigs[netId].numNExcPois, networkConfigs[netId].numNInhPois);
3703 
3704  // configurations for assigned groups and connections
3705  networkConfigs[netId].numGroups = 0;
3706  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3707  if (grpIt->netId == netId)
3708  networkConfigs[netId].numGroups++;
3709  }
3710  networkConfigs[netId].numGroupsAssigned = groupPartitionLists[netId].size();
3711  //networkConfigs[netId].numConnections = localConnectLists[netId].size();
3712  //networkConfigs[netId].numAssignedConnections = localConnectLists[netId].size() + externalConnectLists[netId].size();
3713  //networkConfigs[netId].numConnections = localConnectLists[netId].size() + externalConnectLists[netId].size();
3714  networkConfigs[netId].numConnections = connectConfigMap.size();// temporarily solution: copy all connection info to each GPU
3715 
3716  // find the maximum number of pre- and post-connections among neurons
3717  // SNN::maxNumPreSynN and SNN::maxNumPostSynN are updated
3718  findMaxNumSynapsesNeurons(netId, networkConfigs[netId].maxNumPostSynN, networkConfigs[netId].maxNumPreSynN);
3719 
3720  // find the maximum number of spikes in D1 (i.e., maxDelay == 1) and D2 (i.e., maxDelay >= 2) sets
3721  findMaxSpikesD1D2(netId, networkConfigs[netId].maxSpikesD1, networkConfigs[netId].maxSpikesD2);
3722 
3723  // find the total number of synapses in the network
3724  findNumSynapsesNetwork(netId, networkConfigs[netId].numPostSynNet, networkConfigs[netId].numPreSynNet);
3725 
3726  // find out number of user-defined spike gen and update Noffset of each group config
3727  // Note: groupConfigs[][].Noffset is valid at this time
3728  findNumNSpikeGenAndOffset(netId);
3729  }
3730  }
3731 
3732  // find manager runtime data size, which is sufficient to hold the data of any gpu runtime
3733  memset(&managerRTDSize, 0, sizeof(ManagerRuntimeDataSize));
3734  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3735  if (!groupPartitionLists[netId].empty()) {
3736  // find the maximum number of numN, numNReg ,and numNAssigned among local networks
3737  if (networkConfigs[netId].numNReg > managerRTDSize.maxNumNReg) managerRTDSize.maxNumNReg = networkConfigs[netId].numNReg;
3738  if (networkConfigs[netId].numN > managerRTDSize.maxNumN) managerRTDSize.maxNumN = networkConfigs[netId].numN;
3739  if (networkConfigs[netId].numNAssigned > managerRTDSize.maxNumNAssigned) managerRTDSize.maxNumNAssigned = networkConfigs[netId].numNAssigned;
3740 
3741  // find the maximum number of numNSpikeGen among local networks
3742  if (networkConfigs[netId].numNSpikeGen > managerRTDSize.maxNumNSpikeGen) managerRTDSize.maxNumNSpikeGen = networkConfigs[netId].numNSpikeGen;
3743 
3745  // \note maxNumNPois is used for both numRand and poissonFireRates
3746  if (networkConfigs[netId].numNPois > managerRTDSize.maxNumNPois) managerRTDSize.maxNumNPois = networkConfigs[netId].numNPois;
3747 
3748  // find the maximum number of numGroups and numConnections among local networks
3749  if (networkConfigs[netId].numGroups > managerRTDSize.maxNumGroups) managerRTDSize.maxNumGroups = networkConfigs[netId].numGroups;
3750  if (networkConfigs[netId].numConnections > managerRTDSize.maxNumConnections) managerRTDSize.maxNumConnections = networkConfigs[netId].numConnections;
3751 
3752  // find the maximum number of neurons in a group among local networks
3753  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3754  if (groupConfigMap[grpIt->gGrpId].numN > managerRTDSize.maxNumNPerGroup) managerRTDSize.maxNumNPerGroup = groupConfigMap[grpIt->gGrpId].numN;
3755  }
3756 
3757  // find the maximum number of maxSipkesD1(D2) among networks
3758  if (networkConfigs[netId].maxSpikesD1 > managerRTDSize.maxMaxSpikeD1) managerRTDSize.maxMaxSpikeD1 = networkConfigs[netId].maxSpikesD1;
3759  if (networkConfigs[netId].maxSpikesD2 > managerRTDSize.maxMaxSpikeD2) managerRTDSize.maxMaxSpikeD2 = networkConfigs[netId].maxSpikesD2;
3760 
3761  // find the maximum number of total # of pre- and post-connections among local networks
3762  if (networkConfigs[netId].numPreSynNet > managerRTDSize.maxNumPreSynNet) managerRTDSize.maxNumPreSynNet = networkConfigs[netId].numPreSynNet;
3763  if (networkConfigs[netId].numPostSynNet > managerRTDSize.maxNumPostSynNet) managerRTDSize.maxNumPostSynNet = networkConfigs[netId].numPostSynNet;
3764 
3765  // find the number of numN, and numNReg in the global network
3766  managerRTDSize.glbNumN += networkConfigs[netId].numN;
3767  managerRTDSize.glbNumNReg += networkConfigs[netId].numNReg;
3768  }
3769  }
3770 }
3771 
3772 bool compareSrcNeuron(const ConnectionInfo& first, const ConnectionInfo& second) {
3773  return (first.nSrc + first.srcGLoffset < second.nSrc + second.srcGLoffset);
3774 }
3775 
3776 bool compareDelay(const ConnectionInfo& first, const ConnectionInfo& second) {
3777  return (first.delay < second.delay);
3778 }
3779 
3780 //#define DEBUG__generateConnectionRuntime__pre_pos
3781 //#define DEBUG__generateConnectionRuntime__preSynapticIds
3782 
3783 // Note: ConnectInfo stored in connectionList use global ids
3784 void SNN::generateConnectionRuntime(int netId) {
3785  std::map<int, int> GLoffset; // global nId to local nId offset
3786  std::map<int, int> GLgrpId; // global grpId to local grpId offset
3787 
3788  // load offset between global neuron id and local neuron id
3789  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3790  GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
3791  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3792  }
3793  // FIXME: connId is global connId, use connectConfigs[netId][local connId] instead,
3794  // FIXME; but note connectConfigs[netId][] are NOT complete, lack of exeternal incoming connections
3795  // generate mulSynFast, mulSynSlow in connection-centric array
3796  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3797  // store scaling factors for synaptic currents in connection-centric array
3798  mulSynFast[connIt->second.connId] = connIt->second.mulSynFast;
3799  mulSynSlow[connIt->second.connId] = connIt->second.mulSynSlow;
3800  }
3801 
3802  // parse ConnectionInfo stored in connectionLists[0]
3803  // note: ConnectInfo stored in connectionList use global ids
3804  // generate Npost, Npre, Npre_plastic
3805  int parsedConnections = 0;
3806  memset(managerRuntimeData.Npost, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3807  memset(managerRuntimeData.Npre, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3808  memset(managerRuntimeData.Npre_plastic, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3809 #ifdef LN_FIX_CONNLIST_
3810  for (std::vector<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3811 #else
3812  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3813 #endif
3814  connIt->srcGLoffset = GLoffset[connIt->grpSrc];
3815  if (managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] == SYNAPSE_ID_MASK) {
3816  KERNEL_ERROR("Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)", SYNAPSE_ID_MASK, connIt->nSrc, connIt->grpSrc);
3818  }
3819  if (managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]] == SYNAPSE_ID_MASK) {
3820  KERNEL_ERROR("Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)", SYNAPSE_ID_MASK, connIt->nDest, connIt->grpDest);
3822  }
3823  managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3824  managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3825 
3826  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_PLASTIC) {
3827  sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
3828  managerRuntimeData.Npre_plastic[connIt->nDest + GLoffset[connIt->grpDest]]++;
3829 
3830  // homeostasis
3831  if (groupConfigMap[connIt->grpDest].homeoConfig.WithHomeostasis && groupConfigMDMap[connIt->grpDest].homeoId == -1)
3832  groupConfigMDMap[connIt->grpDest].homeoId = connIt->nDest + GLoffset[connIt->grpDest]; // this neuron info will be printed
3833 
3834  // old access to homeostasis
3835  //if (groupConfigs[netId][GLgrpId[it->grpDest]].WithHomeostasis && groupConfigs[netId][GLgrpId[it->grpDest]].homeoId == -1)
3836  // groupConfigs[netId][GLgrpId[it->grpDest]].homeoId = it->nDest + GLoffset[it->grpDest]; // this neuron info will be printed
3837  }
3838 
3839  // generate the delay vaule
3840  //it->delay = connectConfigMap[it->connId].minDelay + rand() % (connectConfigMap[it->connId].maxDelay - connectConfigMap[it->connId].minDelay + 1);
3841  //assert((it->delay >= connectConfigMap[it->connId].minDelay) && (it->delay <= connectConfigMap[it->connId].maxDelay));
3842  // generate the max weight and initial weight
3843  //float initWt = generateWeight(connectConfigMap[it->connId].connProp, connectConfigMap[it->connId].initWt, connectConfigMap[it->connId].maxWt, it->nSrc, it->grpSrc);
3844  //float initWt = connectConfigMap[it->connId].initWt;
3845  //float maxWt = connectConfigMap[it->connId].maxWt;
3846  // adjust sign of weight based on pre-group (negative if pre is inhibitory)
3847  // this access is fine, isExcitatoryGroup() use global grpId
3848  //it->maxWt = isExcitatoryGroup(it->grpSrc) ? fabs(maxWt) : -1.0 * fabs(maxWt);
3849  //it->initWt = isExcitatoryGroup(it->grpSrc) ? fabs(initWt) : -1.0 * fabs(initWt);
3850 
3851  parsedConnections++;
3852  }
3853  assert(parsedConnections == networkConfigs[netId].numPostSynNet && parsedConnections == networkConfigs[netId].numPreSynNet);
3854 
3855  // generate cumulativePost and cumulativePre
3856  managerRuntimeData.cumulativePost[0] = 0;
3857  managerRuntimeData.cumulativePre[0] = 0;
3858  for (int lNId = 1; lNId < networkConfigs[netId].numNAssigned; lNId++) {
3859  managerRuntimeData.cumulativePost[lNId] = managerRuntimeData.cumulativePost[lNId - 1] + managerRuntimeData.Npost[lNId - 1];
3860  managerRuntimeData.cumulativePre[lNId] = managerRuntimeData.cumulativePre[lNId - 1] + managerRuntimeData.Npre[lNId - 1];
3861  }
3862 
3863  // generate preSynapticIds, parse plastic connections first
3864  memset(managerRuntimeData.Npre, 0, sizeof(short) * networkConfigs[netId].numNAssigned); // reset managerRuntimeData.Npre to zero, so that it can be used as synId
3865  parsedConnections = 0;
3866 #ifdef LN_FIX_CONNLIST_
3867  for (std::vector<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3868 #else
3869  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3870 #endif
3871  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_PLASTIC) {
3872  int pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3873  assert(pre_pos < networkConfigs[netId].numPreSynNet);
3874 
3875  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc])); // managerRuntimeData.Npost[it->nSrc] is not availabe at this parse
3876  connIt->preSynId = managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]; // save managerRuntimeData.Npre[it->nDest] as synId
3877 
3878  managerRuntimeData.Npre[connIt->nDest+ GLoffset[connIt->grpDest]]++;
3879  parsedConnections++;
3880 
3881  // update the maximum number of and pre-connections of a neuron in a group
3882  //if (managerRuntimeData.Npre[it->nDest] > groupInfo[it->grpDest].maxPreConn)
3883  // groupInfo[it->grpDest].maxPreConn = managerRuntimeData.Npre[it->nDest];
3884  }
3885  }
3886  // parse fixed connections
3887 #ifdef LN_FIX_CONNLIST_
3888  for (std::vector<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3889 #else
3890  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3891 #endif
3892  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_FIXED) {
3893  int pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3894  assert(pre_pos < networkConfigs[netId].numPreSynNet);
3895 
3896  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc])); // managerRuntimeData.Npost[it->nSrc] is not availabe at this parse
3897  connIt->preSynId = managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]; // save managerRuntimeData.Npre[it->nDest] as synId
3898 
3899  managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3900  parsedConnections++;
3901 
3902  // update the maximum number of and pre-connections of a neuron in a group
3903  //if (managerRuntimeData.Npre[it->nDest] > groupInfo[it->grpDest].maxPreConn)
3904  // groupInfo[it->grpDest].maxPreConn = managerRuntimeData.Npre[it->nDest];
3905  }
3906  }
3907  assert(parsedConnections == networkConfigs[netId].numPreSynNet);
3908  //printf("parsed pre connections %d\n", parsedConnections);
3909 
3910  // generate postSynapticIds
3911 #ifdef LN_FIX_CONNLIST_
3912  //\todo FIX_CONLIST implement splice sort(compareSrcNeuron) for vector
3913 #else
3914  connectionLists[netId].sort(compareSrcNeuron); // sort by local nSrc id
3915 #endif
3916  memset(managerRuntimeData.postDelayInfo, 0, sizeof(DelayInfo) * (networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1)));
3917  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) { // pre-neuron order, local nId
3918  if (managerRuntimeData.Npost[lNId] > 0) {
3919  std::list<ConnectionInfo> postConnectionList;
3920  ConnectionInfo targetConn;
3921  targetConn.nSrc = lNId ; // the other fields does not matter, use local nid to search
3922 
3923 #ifdef LN_FIX_CONNLIST_
3924  std::list<ConnectionInfo>::iterator firstPostConn;
3925  // \todo FIX_CONLIST rewrite find()
3926 #else
3927  std::list<ConnectionInfo>::iterator firstPostConn = std::find(connectionLists[netId].begin(), connectionLists[netId].end(), targetConn);
3928 #endif
3929  std::list<ConnectionInfo>::iterator lastPostConn = firstPostConn;
3930  std::advance(lastPostConn, managerRuntimeData.Npost[lNId]);
3931  managerRuntimeData.Npost[lNId] = 0; // reset managerRuntimeData.Npost[lNId] to zero, so that it can be used as synId
3932 
3933 #ifdef LN_FIX_CONNLIST_
3934  // \todo FIX_CONLIST implement splice(...) for vector
3935  // Assertion failed : connectionLists[netId].empty(), file src\snn_manager.cpp, line 3531
3936 #else
3937  postConnectionList.splice(postConnectionList.begin(), connectionLists[netId], firstPostConn, lastPostConn);
3938  postConnectionList.sort(compareDelay);
3939 #endif
3940 
3941  // to recognize the INV, data must be homgenous and have a high Signal/Noise ratio (no cludering formating)
3942 #ifdef DEBUG__generateConnectionRuntime__pre_pos
3943  printf("pre_pos = cumulativePre[connIt->nDest] + connIt->preSynId\n");
3944 #endif
3945 #ifdef DEBUG__generateConnectionRuntime__preSynapticIds
3946  printf("preSynapticIds[pre_pos] = SET_CONN_ID((nSrc + GLoffset[grpSrc]), Npost[nSrc + GLoffset[grpSrc]], (GLgrpId[grpSrc]))\n");
3947 #endif
3948  //memset(&managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1)], 0, sizeof(DelayInfo) * (glbNetworkConfig.maxDelay + 1));
3949  int post_pos, pre_pos, lastDelay = 0;
3950  parsedConnections = 0;
3951  //memset(&managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1)], 0, sizeof(DelayInfo) * (glbNetworkConfig.maxDelay + 1));
3952  for (std::list<ConnectionInfo>::iterator connIt = postConnectionList.begin(); connIt != postConnectionList.end(); connIt++) {
3953  assert(connIt->nSrc + GLoffset[connIt->grpSrc] == lNId);
3954  post_pos = managerRuntimeData.cumulativePost[connIt->nSrc + GLoffset[connIt->grpSrc]] + managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3955  pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + connIt->preSynId;
3956 #ifdef DEBUG__generateConnectionRuntime__pre_pos
3957  printf("pre_pos = %3d[ %3d] + %3d\n", managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]], connIt->nDest, connIt->preSynId);
3958 #endif
3959  assert(post_pos < networkConfigs[netId].numPostSynNet);
3960  //assert(pre_pos < numPreSynNet);
3961 
3962  // generate a post synaptic id for the current connection
3963  managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID((connIt->nDest + GLoffset[connIt->grpDest]), connIt->preSynId, (GLgrpId[connIt->grpDest]));// used stored managerRuntimeData.Npre[it->nDest] in it->preSynId
3964  // generate a delay look up table by the way
3965  assert(connIt->delay > 0);
3966  if (connIt->delay > lastDelay) {
3967  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_index_start = managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3968  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_length++;
3969  } else if (connIt->delay == lastDelay) {
3970  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_length++;
3971  } else {
3972  KERNEL_ERROR("Post-synaptic delays not sorted correctly... pre_id=%d, delay[%d]=%d, delay[%d]=%d",
3973  lNId, managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], connIt->delay, managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] - 1, lastDelay);
3974  }
3975  lastDelay = connIt->delay;
3976 
3977  // update the corresponding pre synaptic id
3978  SynInfo preId = managerRuntimeData.preSynapticIds[pre_pos];
3979  assert(GET_CONN_NEURON_ID(preId) == connIt->nSrc + GLoffset[connIt->grpSrc]);
3980  //assert(GET_CONN_GRP_ID(preId) == it->grpSrc);
3981  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], (GLgrpId[connIt->grpSrc]));
3982 #ifdef DEBUG__generateConnectionRuntime__preSynapticIds
3983  printf("preSynapticIds[ %3d] = SET_CONN_ID(( %3d + %3d[ %3d]), %3d[ %3d + %3d[ %3d]], ( %3d[ %3d]));\n",
3984  pre_pos,
3985  connIt->nSrc, GLoffset[connIt->grpSrc], connIt->grpSrc,
3986  managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], connIt->nSrc, GLoffset[connIt->grpSrc], connIt->grpSrc,
3987  GLgrpId[connIt->grpSrc], connIt->grpSrc );
3988 #endif
3989 
3990  managerRuntimeData.wt[pre_pos] = connIt->initWt;
3991  managerRuntimeData.maxSynWt[pre_pos] = connIt->maxWt;
3992  managerRuntimeData.connIdsPreIdx[pre_pos] = connIt->connId;
3993 
3994  managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3995  parsedConnections++;
3996 
3997  // update the maximum number of and post-connections of a neuron in a group
3998  //if (managerRuntimeData.Npost[it->nSrc] > groupInfo[it->grpSrc].maxPostConn)
3999  // groupInfo[it->grpSrc].maxPostConn = managerRuntimeData.Npost[it->nSrc];
4000  }
4001  assert(parsedConnections == managerRuntimeData.Npost[lNId]);
4002  //printf("parsed post connections %d\n", parsedConnections);
4003  // note: elements in postConnectionList are deallocated automatically with postConnectionList
4004  /* for postDelayInfo debugging
4005  printf("%d ", lNId);
4006  int maxDelay_ = glbNetworkConfig.maxDelay;
4007  for (int t = 0; t < maxDelay_ + 1; t ++) {
4008  printf("[%d,%d]",
4009  managerRuntimeData.postDelayInfo[lNId * (maxDelay_ + 1) + t].delay_index_start,
4010  managerRuntimeData.postDelayInfo[lNId * (maxDelay_ + 1) + t].delay_length);
4011  }
4012  printf("\n");
4013  */
4014  }
4015  }
4016  assert(connectionLists[netId].empty());
4017 
4018  //int p = managerRuntimeData.Npost[src];
4019 
4020  //assert(managerRuntimeData.Npost[src] >= 0);
4021  //assert(managerRuntimeData.Npre[dest] >= 0);
4022  //assert((src * maxNumPostSynGrp + p) / numN < maxNumPostSynGrp); // divide by numN to prevent INT overflow
4023 
4024  //unsigned int post_pos = managerRuntimeData.cumulativePost[src] + managerRuntimeData.Npost[src];
4025  //unsigned int pre_pos = managerRuntimeData.cumulativePre[dest] + managerRuntimeData.Npre[dest];
4026 
4027  //assert(post_pos < numPostSynNet);
4028  //assert(pre_pos < numPreSynNet);
4029 
4031  //managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID(dest, managerRuntimeData.Npre[dest], destGrp);
4032  //tmp_SynapticDelay[post_pos] = dVal;
4033 
4034  //managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID(src, managerRuntimeData.Npost[src], srcGrp);
4035  //managerRuntimeData.wt[pre_pos] = synWt;
4036  //managerRuntimeData.maxSynWt[pre_pos] = maxWt;
4037  //managerRuntimeData.connIdsPreIdx[pre_pos] = connId;
4038 
4039  //bool synWtType = GET_FIXED_PLASTIC(connProp);
4040 
4041  //if (synWtType == SYN_PLASTIC) {
4042  // sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
4043  // managerRuntimeData.Npre_plastic[dest]++;
4044  // // homeostasis
4045  // if (groupConfigs[0][destGrp].WithHomeostasis && groupConfigs[0][destGrp].homeoId ==-1)
4046  // groupConfigs[0][destGrp].homeoId = dest; // this neuron info will be printed
4047  //}
4048 
4049  //managerRuntimeData.Npre[dest] += 1;
4050  //managerRuntimeData.Npost[src] += 1;
4051 
4052  //groupInfo[srcGrp].numPostConn++;
4053  //groupInfo[destGrp].numPreConn++;
4054 
4056  //if (managerRuntimeData.Npost[src] > groupInfo[srcGrp].maxPostConn)
4057  // groupInfo[srcGrp].maxPostConn = managerRuntimeData.Npost[src];
4058  //if (managerRuntimeData.Npre[dest] > groupInfo[destGrp].maxPreConn)
4059  // groupInfo[destGrp].maxPreConn = managerRuntimeData.Npre[src];
4060 }
4061 
4062 void SNN::generateCompConnectionRuntime(int netId)
4063 {
4064  std::map<int, int> GLgrpId; // global grpId to local grpId offset
4065 
4066  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
4067  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
4068  //printf("Global group id %i; Local group id %i\n", grpIt->gGrpId, grpIt->lGrpId);
4069  }
4070 
4071  //printf("The current netid is: %i\n", netId);
4072 
4073  for (std::list<compConnectConfig>::iterator connIt = localCompConnectLists[netId].begin(); connIt != localCompConnectLists[netId].end(); connIt++) {
4074  //printf("The size of localCompConnectLists is: %i\n", localCompConnectLists[netId].size());
4075  int grpLower = connIt->grpSrc;
4076  int grpUpper = connIt->grpDest;
4077 
4078  int i = groupConfigs[netId][GLgrpId[grpLower]].numCompNeighbors;
4079  if (i >= MAX_NUM_COMP_CONN) {
4080  KERNEL_ERROR("Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
4081  groupConfigMap[grpLower].grpName.c_str(), grpLower, (int)MAX_NUM_COMP_CONN);
4083  }
4084  groupConfigs[netId][GLgrpId[grpLower]].compNeighbors[i] = grpUpper;
4085  groupConfigs[netId][GLgrpId[grpLower]].compCoupling[i] = groupConfigs[netId][GLgrpId[grpUpper]].compCouplingDown; // get down-coupling from upper neighbor
4086  groupConfigs[netId][GLgrpId[grpLower]].numCompNeighbors++;
4087 
4088  int j = groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors;
4089  if (j >= MAX_NUM_COMP_CONN) {
4090  KERNEL_ERROR("Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
4091  groupConfigMap[grpUpper].grpName.c_str(), grpUpper, (int)MAX_NUM_COMP_CONN);
4093  }
4094  groupConfigs[netId][GLgrpId[grpUpper]].compNeighbors[j] = grpLower;
4095  groupConfigs[netId][GLgrpId[grpUpper]].compCoupling[j] = groupConfigs[netId][GLgrpId[grpLower]].compCouplingUp; // get up-coupling from lower neighbor
4096  groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors++;
4097 
4098  //printf("Group %i (local group %i) has %i compartmental neighbors!\n", grpUpper, GLgrpId[grpUpper], groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors);
4099  }
4100 }
4101 
4102 
4103 void SNN::generatePoissonGroupRuntime(int netId, int lGrpId) {
4104  resetNeuromodulator(netId, lGrpId); // Fix LN2021
4105 
4106  for(int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
4107  resetPoissonNeuron(netId, lGrpId, lNId);
4108 }
4109 
4110 
4111 void SNN::collectGlobalNetworkConfigC() {
4112  // scan all connect configs to find the maximum delay in the global network, update glbNetworkConfig.maxDelay
4113  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
4114  if (connIt->second.maxDelay > glbNetworkConfig.maxDelay)
4115  glbNetworkConfig.maxDelay = connIt->second.maxDelay;
4116  }
4117  assert(connectConfigMap.size() > 0 || glbNetworkConfig.maxDelay != -1);
4118 
4119  // scan all group configs to find the number of (reg, pois, exc, inh) neuron in the global network
4120  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4121  if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON)) {
4122  glbNetworkConfig.numNExcPois += groupConfigMap[gGrpId].numN;
4123  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON)) {
4124  glbNetworkConfig.numNInhPois += groupConfigMap[gGrpId].numN;
4125  } else if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON)) {
4126  glbNetworkConfig.numNExcReg += groupConfigMap[gGrpId].numN;
4127  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON)) {
4128  glbNetworkConfig.numNInhReg += groupConfigMap[gGrpId].numN;
4129  }
4130 
4131  if (groupConfigMDMap[gGrpId].maxOutgoingDelay == 1)
4132  glbNetworkConfig.numN1msDelay += groupConfigMap[gGrpId].numN;
4133  else if (groupConfigMDMap[gGrpId].maxOutgoingDelay >= 2)
4134  glbNetworkConfig.numN2msDelay += groupConfigMap[gGrpId].numN;
4135  }
4136 
4137  glbNetworkConfig.numNReg = glbNetworkConfig.numNExcReg + glbNetworkConfig.numNInhReg;
4138  glbNetworkConfig.numNPois = glbNetworkConfig.numNExcPois + glbNetworkConfig.numNInhPois;
4139  glbNetworkConfig.numN = glbNetworkConfig.numNReg + glbNetworkConfig.numNPois;
4140 }
4141 
4142 
4143 void SNN::collectGlobalNetworkConfigP() {
4144  // print group and connection overview
4145  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4146  if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
4147  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
4148  glbNetworkConfig.numSynNet += connIt->numberOfConnections;
4149 
4150  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
4151  glbNetworkConfig.numSynNet += connIt->numberOfConnections;
4152  }
4153  }
4154 }
4155 
4156 // after all the initalization. Its time to create the synaptic weights, weight change and also
4157 // time of firing these are the mostly costly arrays so dense packing is essential to minimize wastage of space
4158 void SNN::compileSNN() {
4159  KERNEL_DEBUG("Beginning compilation of the network....");
4160 
4161  // compile (update) group and connection configs according to their mutual information
4162  // update GroupConfig::MaxDelay GroupConfig::FixedInputWts
4163  // assign GroupConfig::StartN and GroupConfig::EndN
4164  // Note: MaxDelay, FixedInputWts, StartN, and EndN are invariant in single-GPU or multi-GPUs mode
4165  compileGroupConfig();
4166 
4167  compileConnectConfig(); // for future use
4168 
4169  // collect the global network config according to compiled gorup and connection configs
4170  // collect SNN::maxDelay_
4171  collectGlobalNetworkConfigC();
4172 
4173  // perform various consistency checks:
4174  // - numNeurons vs. sum of all neurons
4175  // - STDP set on a post-group with incoming plastic connections
4176  // - etc.
4177  verifyNetwork();
4178 
4179  // display the global network configuration
4180  KERNEL_INFO("\n");
4181  KERNEL_INFO("************************** Global Network Configuration *******************************");
4182  KERNEL_INFO("The number of neurons in the network (numN) = %d", glbNetworkConfig.numN);
4183  KERNEL_INFO("The number of regular neurons in the network (numNReg:numNExcReg:numNInhReg) = %d:%d:%d", glbNetworkConfig.numNReg, glbNetworkConfig.numNExcReg, glbNetworkConfig.numNInhReg);
4184  KERNEL_INFO("The number of poisson neurons in the network (numNPois:numNExcPois:numInhPois) = %d:%d:%d", glbNetworkConfig.numNPois, glbNetworkConfig.numNExcPois, glbNetworkConfig.numNInhPois);
4185  KERNEL_INFO("The maximum axonal delay in the network (maxDelay) = %d", glbNetworkConfig.maxDelay);
4186 
4187  //ensure that we dont compile the network again
4188  snnState = COMPILED_SNN;
4189 }
4190 
4191 void SNN::compileConnectConfig() {
4192 
4193  // Fix (reviewed) for failing UnitTest Interface.setSTDPDeath.
4194  bool synWtType;
4195  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
4196  ConnectConfig &config = connIt->second;
4197  if (connIt->second.stdpConfig.WithSTDP) {
4198  synWtType = GET_FIXED_PLASTIC(connIt->second.connProp); // derived from compileGroupConfig()
4199  if (synWtType != SYN_PLASTIC) {
4200  KERNEL_ERROR("STDP requires plastic connection");
4202  }
4203  }
4204  }
4205 }
4206 
4207 void SNN::compileGroupConfig() {
4208  int grpSrc;
4209  bool synWtType;
4210 
4211  // find the maximum delay for each group according to incoming connection
4212  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
4213  // check if the current connection's delay meaning grpSrc's delay
4214  // is greater than the MaxDelay for grpSrc. We find the maximum
4215  // delay for the grpSrc by this scheme.
4216  grpSrc = connIt->second.grpSrc;
4217  if (connIt->second.maxDelay > groupConfigMDMap[grpSrc].maxOutgoingDelay)
4218  groupConfigMDMap[grpSrc].maxOutgoingDelay = connIt->second.maxDelay;
4219 
4220  // given group has plastic connection, and we need to apply STDP rule...
4221  synWtType = GET_FIXED_PLASTIC(connIt->second.connProp);
4222  if (synWtType == SYN_PLASTIC) {
4223  groupConfigMDMap[connIt->second.grpDest].fixedInputWts = false;
4224  }
4225  }
4226 
4227  // assigned global neruon ids to each group in the order...
4228  // !!!!!!! IMPORTANT : NEURON ORGANIZATION/ARRANGEMENT MAP !!!!!!!!!!
4229  // <--- Excitatory --> | <-------- Inhibitory REGION ----------> | <-- Excitatory -->
4230  // Excitatory-Regular | Inhibitory-Regular | Inhibitory-Poisson | Excitatory-Poisson
4231  int assignedGroup = 0;
4232  int availableNeuronId = 0;
4233  for(int order = 0; order < 4; order++) {
4234  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4235  if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 3) {
4236  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
4237  assignedGroup++;
4238  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 2) {
4239  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
4240  assignedGroup++;
4241  } else if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 0) {
4242  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
4243  assignedGroup++;
4244  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 1) {
4245  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
4246  assignedGroup++;
4247  }
4248  }
4249  }
4250  //assert(availableNeuronId == numN);
4251  assert(assignedGroup == numGroups);
4252 
4253 #ifdef LN_I_CALC_TYPES
4254  // group based IcalcType
4255  auto networkDefault = sim_with_conductances ? COBA : CUBA;
4256  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4257  if (groupConfigMap[gGrpId].icalcType == UNKNOWN_ICALC) {
4258  groupConfigMap[gGrpId].icalcType = networkDefault;
4259  groupConfigMap[gGrpId].with_NMDA_rise = sim_with_NMDA_rise;
4260  groupConfigMap[gGrpId].with_GABAb_rise = sim_with_GABAb_rise;
4261 #define LN_I_CALC_TYPES__REQUIRED_FOR_BACKWARD_COMP
4262  auto& config = groupConfigMap[gGrpId].conductanceConfig;
4263  config.dAMPA = dAMPA; // loss of precision is acceptable
4264  config.dGABAa = dGABAa;
4265  config.dGABAb = dGABAb;
4266  config.dNMDA = dNMDA;
4267  config.rGABAb = rGABAb;
4268  config.rNMDA = rNMDA;
4269  config.sGABAb = sGABAb;
4270  config.sNMDA = sNMDA;
4271  }
4272  }
4273 #endif
4274 
4275 }
4276 
4277 void SNN::connectNetwork() {
4278  // this parse generates local connections
4279  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4280  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
4281  switch(connIt->type) {
4282  case CONN_RANDOM:
4283  connectRandom(netId, connIt, false);
4284  break;
4285  case CONN_FULL:
4286  connectFull(netId, connIt, false);
4287  break;
4288  case CONN_FULL_NO_DIRECT:
4289  connectFull(netId, connIt, false);
4290  break;
4291  case CONN_ONE_TO_ONE:
4292  connectOneToOne(netId, connIt, false);
4293  break;
4294  case CONN_GAUSSIAN:
4295  connectGaussian(netId, connIt, false);
4296  break;
4297  case CONN_USER_DEFINED:
4298  connectUserDefined(netId, connIt, false);
4299  break;
4300  default:
4301  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
4303  }
4304  }
4305  }
4306 
4307  // this parse generates external connections
4308  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4309  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
4310  switch(connIt->type) {
4311  case CONN_RANDOM:
4312  connectRandom(netId, connIt, true);
4313  break;
4314  case CONN_FULL:
4315  connectFull(netId, connIt, true);
4316  break;
4317  case CONN_FULL_NO_DIRECT:
4318  connectFull(netId, connIt, true);
4319  break;
4320  case CONN_ONE_TO_ONE:
4321  connectOneToOne(netId, connIt, true);
4322  break;
4323  case CONN_GAUSSIAN:
4324  connectGaussian(netId, connIt, true);
4325  break;
4326  case CONN_USER_DEFINED:
4327  connectUserDefined(netId, connIt, true);
4328  break;
4329  default:
4330  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
4332  }
4333  }
4334  }
4335 }
4336 
4337 
4340 //#include <thread>
4341 //void SNN::connectNetworkMT() {
4342 // // this parse generates local connections
4343 //
4344 // std::vector<std::thread> pool;
4345 //
4346 // for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4347 //
4348 // pool.push_back(move(std::thread([&, netId]() {
4349 //
4350 // for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
4351 // switch (connIt->type) {
4352 // case CONN_RANDOM:
4353 // //connectRandomMT(netId, connIt, false); // does not work
4354 // connectRandom(netId, connIt, false);
4355 // break;
4356 // case CONN_FULL:
4357 // connectFull(netId, connIt, false);
4358 // break;
4359 // case CONN_FULL_NO_DIRECT:
4360 // connectFull(netId, connIt, false);
4361 // break;
4362 // case CONN_ONE_TO_ONE:
4363 // connectOneToOne(netId, connIt, false);
4364 // break;
4365 // case CONN_GAUSSIAN:
4366 // connectGaussian(netId, connIt, false);
4367 // break;
4368 // case CONN_USER_DEFINED:
4369 // connectUserDefined(netId, connIt, false);
4370 // break;
4371 // default:
4372 // KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
4373 // exitSimulation(-1);
4374 // }
4375 // }
4376 //
4377 // })));
4378 // }
4379 //
4380 // for (int i = 0; i < pool.size(); i++)
4381 // pool[i].join();
4382 //
4383 //
4384 // pool.clear();
4385 //
4386 // // this parse generates external connections
4387 // for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4388 //
4389 // pool.push_back(move(std::thread([&, netId]() {
4390 //
4391 // for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
4392 // switch (connIt->type) {
4393 // case CONN_RANDOM:
4394 // //connectRandomMT(netId, connIt, true);
4395 // connectRandom(netId, connIt, true);
4396 // break;
4397 // case CONN_FULL:
4398 // connectFull(netId, connIt, true);
4399 // break;
4400 // case CONN_FULL_NO_DIRECT:
4401 // connectFull(netId, connIt, true);
4402 // break;
4403 // case CONN_ONE_TO_ONE:
4404 // connectOneToOne(netId, connIt, true);
4405 // break;
4406 // case CONN_GAUSSIAN:
4407 // connectGaussian(netId, connIt, true);
4408 // break;
4409 // case CONN_USER_DEFINED:
4410 // connectUserDefined(netId, connIt, true);
4411 // break;
4412 // default:
4413 // KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
4414 // exitSimulation(-1);
4415 // }
4416 // }
4417 //
4418 // })));
4419 // }
4420 //
4421 // for (int i = 0; i < pool.size(); i++)
4422 // pool[i].join();
4423 //}
4424 //
4425 
4426 #ifdef LN_SETUP_NETWORK_MT
4427 //featFastSetup LN20201108
4428 // 100% util but does not work correct
4429 void SNN::connectNetworkMT() {
4430  // this parse generates local connections
4431 
4432  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4433  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
4434  switch (connIt->type) {
4435  case CONN_RANDOM:
4436  connectRandomMT(netId, connIt, false); // does not work correctly
4437  break;
4438  case CONN_FULL:
4439  connectFull(netId, connIt, false);
4440  break;
4441  case CONN_FULL_NO_DIRECT:
4442  connectFull(netId, connIt, false);
4443  break;
4444  case CONN_ONE_TO_ONE:
4445  connectOneToOne(netId, connIt, false);
4446  break;
4447  case CONN_GAUSSIAN:
4448  connectGaussian(netId, connIt, false);
4449  break;
4450  case CONN_USER_DEFINED:
4451  connectUserDefined(netId, connIt, false);
4452  break;
4453  default:
4454  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
4455  exitSimulation(-1);
4456  }
4457  }
4458  }
4459 
4460  // this parse generates external connections
4461  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4462  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
4463  switch (connIt->type) {
4464  case CONN_RANDOM:
4465  connectRandomMT(netId, connIt, true); // does not work correctly
4466  break;
4467  case CONN_FULL:
4468  connectFull(netId, connIt, true);
4469  break;
4470  case CONN_FULL_NO_DIRECT:
4471  connectFull(netId, connIt, true);
4472  break;
4473  case CONN_ONE_TO_ONE:
4474  connectOneToOne(netId, connIt, true);
4475  break;
4476  case CONN_GAUSSIAN:
4477  connectGaussian(netId, connIt, true);
4478  break;
4479  case CONN_USER_DEFINED:
4480  connectUserDefined(netId, connIt, true);
4481  break;
4482  default:
4483  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
4484  exitSimulation(-1);
4485  }
4486  }
4487  }
4488 
4489 }
4490 #endif
4491 
4493 inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, int externalNetId) {
4494  //assert(destN <= CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
4495  ConnectionInfo connInfo;
4496  connInfo.grpSrc = _grpSrc;
4497  connInfo.grpDest = _grpDest;
4498  connInfo.nSrc = _nSrc;
4499  connInfo.nDest = _nDest;
4500  connInfo.srcGLoffset = 0;
4501  connInfo.connId = _connId;
4502  connInfo.preSynId = -1;
4503  connInfo.initWt = 0.0f;
4504  connInfo.maxWt = 0.0f;
4505  connInfo.delay = 0;
4506 
4507  // generate the delay vaule
4508  connInfo.delay = connectConfigMap[_connId].minDelay + rand() % (connectConfigMap[_connId].maxDelay - connectConfigMap[_connId].minDelay + 1);
4509  assert((connInfo.delay >= connectConfigMap[_connId].minDelay) && (connInfo.delay <= connectConfigMap[_connId].maxDelay));
4510  // generate the max weight and initial weight
4511  //float initWt = generateWeight(connectConfigMap[it->connId].connProp, connectConfigMap[it->connId].initWt, connectConfigMap[it->connId].maxWt, it->nSrc, it->grpSrc);
4512  float initWt = connectConfigMap[_connId].initWt;
4513  float maxWt = connectConfigMap[_connId].maxWt;
4514  // adjust sign of weight based on pre-group (negative if pre is inhibitory)
4515  // this access is fine, isExcitatoryGroup() use global grpId
4516  connInfo.maxWt = isExcitatoryGroup(_grpSrc) ? fabs(maxWt) : -1.0 * fabs(maxWt);
4517  connInfo.initWt = isExcitatoryGroup(_grpSrc) ? fabs(initWt) : -1.0 * fabs(initWt);
4518 
4519  connectionLists[netId].push_back(connInfo);
4520 
4521  // If the connection is external, copy the connection info to the external network
4522  if (externalNetId >= 0)
4523  connectionLists[externalNetId].push_back(connInfo);
4524 }
4525 
4526 #ifdef LN_SETUP_NETWORK_MT
4527 inline void SNN::connectNeuronsMT(std::mutex &mtx, int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, int externalNetId) {
4529  //assert(destN <= CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
4530  ConnectionInfo connInfo;
4531  connInfo.grpSrc = _grpSrc;
4532  connInfo.grpDest = _grpDest;
4533  connInfo.nSrc = _nSrc;
4534  connInfo.nDest = _nDest;
4535  connInfo.srcGLoffset = 0;
4536  connInfo.connId = _connId;
4537  connInfo.preSynId = -1;
4538  connInfo.initWt = 0.0f;
4539  connInfo.maxWt = 0.0f;
4540  connInfo.delay = 0;
4541 
4542  // generate the delay vaule
4543  connInfo.delay = connectConfigMap[_connId].minDelay + rand() % (connectConfigMap[_connId].maxDelay - connectConfigMap[_connId].minDelay + 1);
4544  assert((connInfo.delay >= connectConfigMap[_connId].minDelay) && (connInfo.delay <= connectConfigMap[_connId].maxDelay));
4545  // generate the max weight and initial weight
4546  //float initWt = generateWeight(connectConfigMap[it->connId].connProp, connectConfigMap[it->connId].initWt, connectConfigMap[it->connId].maxWt, it->nSrc, it->grpSrc);
4547  float initWt = connectConfigMap[_connId].initWt;
4548  float maxWt = connectConfigMap[_connId].maxWt;
4549  // adjust sign of weight based on pre-group (negative if pre is inhibitory)
4550  // this access is fine, isExcitatoryGroup() use global grpId
4551  connInfo.maxWt = isExcitatoryGroup(_grpSrc) ? fabs(maxWt) : -1.0 * fabs(maxWt);
4552  connInfo.initWt = isExcitatoryGroup(_grpSrc) ? fabs(initWt) : -1.0 * fabs(initWt);
4553 
4554  //
4555  /*
4556  mutex works but slows down with 100% util
4557  partitionSNNMT: 206.8s
4558  generateRuntimeSNN: 191.5s
4559 
4560  without:
4561  24s
4562  Assertion failed: parsedConnections == networkConfigs[netId].numPostSynNet && parsedConnections == networkConfigs[netId].numPreSynNet, file src\snn_manager.cpp, line 3385
4563 
4564  */
4565  //std::lock_guard<std::mutex> guard(mtx);
4566 
4567  connectionLists[netId].push_back(connInfo);
4568 
4569  // If the connection is external, copy the connection info to the external network
4570  if (externalNetId >= 0)
4571  connectionLists[externalNetId].push_back(connInfo);
4572 }
4573 #endif
4574 
4575 
4577 inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, float initWt, float maxWt, uint8_t delay, int externalNetId) {
4578  //assert(destN <= CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
4579  ConnectionInfo connInfo;
4580  connInfo.grpSrc = _grpSrc;
4581  connInfo.grpDest = _grpDest;
4582  connInfo.nSrc = _nSrc;
4583  connInfo.nDest = _nDest;
4584  connInfo.srcGLoffset = 0;
4585  connInfo.connId = _connId;
4586  connInfo.preSynId = -1;
4587  // adjust the sign of the weight based on inh/exc connection
4588  connInfo.initWt = isExcitatoryGroup(_grpSrc) ? fabs(initWt) : -1.0*fabs(initWt);
4589  connInfo.maxWt = isExcitatoryGroup(_grpSrc) ? fabs(maxWt) : -1.0*fabs(maxWt);
4590  connInfo.delay = delay;
4591 
4592  connectionLists[netId].push_back(connInfo);
4593 
4594  // If the connection is external, copy the connection info to the external network
4595  if (externalNetId >= 0)
4596  connectionLists[externalNetId].push_back(connInfo);
4597 }
4598 
4599 // make 'C' full connections from grpSrc to grpDest
4600 void SNN::connectFull(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
4601  int grpSrc = connIt->grpSrc;
4602  int grpDest = connIt->grpDest;
4603  bool noDirect = (connIt->type == CONN_FULL_NO_DIRECT);
4604  int externalNetId = -1;
4605 
4606  if (isExternal) {
4607  externalNetId = groupConfigMDMap[grpDest].netId;
4608  assert(netId != externalNetId);
4609  }
4610 
4611  int gPreStart = groupConfigMDMap[grpSrc].gStartN;
4612  for(int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
4613  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
4614  int gPostStart = groupConfigMDMap[grpDest].gStartN;
4615  for(int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) { // j: the temp neuron id
4616  // if flag is set, don't connect direct connections
4617  if(noDirect && gPreN == gPostN)
4618  continue;
4619 
4620  // check whether pre-neuron location is in RF of post-neuron
4621  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
4622  if (!isPoint3DinRF(connIt->connRadius, locPre, locPost))
4623  continue;
4624 
4625  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
4626  connIt->numberOfConnections++;
4627  }
4628  }
4629 
4630  std::list<GroupConfigMD>::iterator grpIt;
4631  GroupConfigMD targetGrp;
4632 
4633  // update numPostSynapses and numPreSynapses of groups in the local network
4634  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4635  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4636  assert(grpIt != groupPartitionLists[netId].end());
4637  grpIt->numPostSynapses += connIt->numberOfConnections;
4638 
4639  targetGrp.gGrpId = grpDest; // the other fields does not matter
4640  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4641  assert(grpIt != groupPartitionLists[netId].end());
4642  grpIt->numPreSynapses += connIt->numberOfConnections;
4643 
4644  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
4645  if (isExternal) {
4646  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4647  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4648  assert(grpIt != groupPartitionLists[externalNetId].end());
4649  grpIt->numPostSynapses += connIt->numberOfConnections;
4650 
4651  targetGrp.gGrpId = grpDest; // the other fields does not matter
4652  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4653  assert(grpIt != groupPartitionLists[externalNetId].end());
4654  grpIt->numPreSynapses += connIt->numberOfConnections;
4655  }
4656 }
4657 
4658 void SNN::connectGaussian(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
4659  // in case pre and post have different Grid3D sizes: scale pre to the grid size of post
4660  int grpSrc = connIt->grpSrc;
4661  int grpDest = connIt->grpDest;
4662  Grid3D grid_i = getGroupGrid3D(grpSrc);
4663  Grid3D grid_j = getGroupGrid3D(grpDest);
4664  Point3D scalePre = Point3D(grid_j.numX, grid_j.numY, grid_j.numZ) / Point3D(grid_i.numX, grid_i.numY, grid_i.numZ);
4665  int externalNetId = -1;
4666 
4667  if (isExternal) {
4668  externalNetId = groupConfigMDMap[grpDest].netId;
4669  assert(netId != externalNetId);
4670  }
4671 
4672  for(int i = groupConfigMDMap[grpSrc].gStartN; i <= groupConfigMDMap[grpSrc].gEndN; i++) {
4673  Point3D loc_i = getNeuronLocation3D(i)*scalePre; // i: adjusted 3D coordinates
4674 
4675  for(int j = groupConfigMDMap[grpDest].gStartN; j <= groupConfigMDMap[grpDest].gEndN; j++) { // j: the temp neuron id
4676  // check whether pre-neuron location is in RF of post-neuron
4677  Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
4678 
4679  // make sure point is in RF
4680  double rfDist = getRFDist3D(connIt->connRadius,loc_i,loc_j);
4681  if (rfDist < 0.0 || rfDist > 1.0)
4682  continue;
4683 
4684  // if rfDist is valid, it returns a number between 0 and 1
4685  // we want these numbers to fit to Gaussian weigths, so that rfDist=0 corresponds to max Gaussian weight
4686  // and rfDist=1 corresponds to 0.1 times max Gaussian weight
4687  // so we're looking at gauss = exp(-a*rfDist), where a such that exp(-a)=0.1
4688  // solving for a, we find that a = 2.3026
4689  double gauss = exp(-2.3026*rfDist);
4690  if (gauss < 0.1)
4691  continue;
4692 
4693  if (drand48() < connIt->connProbability) {
4694  float initWt = gauss * connIt->initWt; // scale weight according to gauss distance
4695  float maxWt = connIt->maxWt;
4696  uint8_t delay = connIt->minDelay + rand() % (connIt->maxDelay - connIt->minDelay + 1);
4697  assert((delay >= connIt->minDelay) && (delay <= connIt->maxDelay));
4698 
4699  connectNeurons(netId, grpSrc, grpDest, i, j, connIt->connId, initWt, maxWt, delay, externalNetId);
4700  connIt->numberOfConnections++;
4701  }
4702  }
4703  }
4704 
4705  std::list<GroupConfigMD>::iterator grpIt;
4706  GroupConfigMD targetGrp;
4707 
4708  // update numPostSynapses and numPreSynapses of groups in the local network
4709  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4710  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4711  assert(grpIt != groupPartitionLists[netId].end());
4712  grpIt->numPostSynapses += connIt->numberOfConnections;
4713 
4714  targetGrp.gGrpId = grpDest; // the other fields does not matter
4715  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4716  assert(grpIt != groupPartitionLists[netId].end());
4717  grpIt->numPreSynapses += connIt->numberOfConnections;
4718 
4719  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
4720  if (isExternal) {
4721  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4722  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4723  assert(grpIt != groupPartitionLists[externalNetId].end());
4724  grpIt->numPostSynapses += connIt->numberOfConnections;
4725 
4726  targetGrp.gGrpId = grpDest; // the other fields does not matter
4727  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4728  assert(grpIt != groupPartitionLists[externalNetId].end());
4729  grpIt->numPreSynapses += connIt->numberOfConnections;
4730  }
4731 }
4732 
4733 void SNN::connectOneToOne(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
4734  int grpSrc = connIt->grpSrc;
4735  int grpDest = connIt->grpDest;
4736  int externalNetId = -1;
4737 
4738  if (isExternal) {
4739  externalNetId = groupConfigMDMap[grpDest].netId;
4740  assert(netId != externalNetId);
4741  }
4742 
4743  assert( groupConfigMap[grpDest].numN == groupConfigMap[grpSrc].numN);
4744 
4745  // NOTE: RadiusRF does not make a difference here: ignore
4746  for(int gPreN = groupConfigMDMap[grpSrc].gStartN, gPostN = groupConfigMDMap[grpDest].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++, gPostN++) {
4747  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
4748  connIt->numberOfConnections++;
4749  }
4750 
4751  std::list<GroupConfigMD>::iterator grpIt;
4752  GroupConfigMD targetGrp;
4753 
4754  // update numPostSynapses and numPreSynapses of groups in the local network
4755  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4756  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4757  assert(grpIt != groupPartitionLists[netId].end());
4758  grpIt->numPostSynapses += connIt->numberOfConnections;
4759 
4760  targetGrp.gGrpId = grpDest; // the other fields does not matter
4761  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4762  assert(grpIt != groupPartitionLists[netId].end());
4763  grpIt->numPreSynapses += connIt->numberOfConnections;
4764 
4765  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
4766  if (isExternal) {
4767  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4768  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4769  assert(grpIt != groupPartitionLists[externalNetId].end());
4770  grpIt->numPostSynapses += connIt->numberOfConnections;
4771 
4772  targetGrp.gGrpId = grpDest; // the other fields does not matter
4773  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4774  assert(grpIt != groupPartitionLists[externalNetId].end());
4775  grpIt->numPreSynapses += connIt->numberOfConnections;
4776  }
4777 }
4778 
4779 // make 'C' random connections from grpSrc to grpDest
4780 void SNN::connectRandom(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
4781  int grpSrc = connIt->grpSrc;
4782  int grpDest = connIt->grpDest;
4783  int externalNetId = -1;
4784 
4785  if (isExternal) {
4786  externalNetId = groupConfigMDMap[grpDest].netId;
4787  assert(netId != externalNetId);
4788  }
4789 
4790  int gPreStart = groupConfigMDMap[grpSrc].gStartN;
4791  for(int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
4792  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
4793  int gPostStart = groupConfigMDMap[grpDest].gStartN;
4794  for(int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
4795  // check whether pre-neuron location is in RF of post-neuron
4796  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
4797  if (!isPoint3DinRF(connIt->connRadius, locPre, locPost))
4798  continue;
4799 
4800  if (drand48() < connIt->connProbability) {
4801  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
4802  connIt->numberOfConnections++;
4803  }
4804  }
4805  }
4806 
4807  std::list<GroupConfigMD>::iterator grpIt;
4808  GroupConfigMD targetGrp;
4809 
4810  // update numPostSynapses and numPreSynapses of groups in the local network
4811  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4812  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4813  assert(grpIt != groupPartitionLists[netId].end());
4814  grpIt->numPostSynapses += connIt->numberOfConnections;
4815 
4816  targetGrp.gGrpId = grpDest; // the other fields does not matter
4817  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4818  assert(grpIt != groupPartitionLists[netId].end());
4819  grpIt->numPreSynapses += connIt->numberOfConnections;
4820 
4821  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
4822  if (isExternal) {
4823  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4824  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4825  assert(grpIt != groupPartitionLists[externalNetId].end());
4826  grpIt->numPostSynapses += connIt->numberOfConnections;
4827 
4828  targetGrp.gGrpId = grpDest; // the other fields does not matter
4829  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4830  assert(grpIt != groupPartitionLists[externalNetId].end());
4831  grpIt->numPreSynapses += connIt->numberOfConnections;
4832  }
4833 }
4834 
4835 #ifdef LN_SETUP_NETWORK_MT
4836 #include <thread>
4837 #include <atomic>
4838 #include <mutex>
4839 //#define DEBUG_connectRandomMT_SEQ
4840 // make 'C' random connections from grpSrc to grpDest
4841 /* fast but not correct:
4842 partitionSNNMT: 14.7s
4843 Assertion failed: parsedConnections == networkConfigs[netId].numPostSynNet && parsedConnections == networkConfigs[netId].numPreSynNet, file src\snn_manager.cpp, line 3385
4844 */
4845 void SNN::connectRandomMT(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
4846  int grpSrc = connIt->grpSrc;
4847  int grpDest = connIt->grpDest;
4848  int externalNetId = -1;
4849 
4850  if (isExternal) {
4851  externalNetId = groupConfigMDMap[grpDest].netId;
4852  assert(netId != externalNetId);
4853  }
4854 
4855  int gPreStart = groupConfigMDMap[grpSrc].gStartN;
4856 
4857 #ifdef DEBUG_connectRandomMT_SEQ
4858 
4859 
4860  for (int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
4861  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
4862  int gPostStart = groupConfigMDMap[grpDest].gStartN;
4863 // 7.5s
4864  for (int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
4865  // check whether pre-neuron location is in RF of post-neuron
4866  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
4867  if (!isPoint3DinRF(connIt->connRadius, locPre, locPost))
4868  continue;
4869 // 32.5s
4870  if (drand48() < connIt->connProbability) {
4871  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
4872  connIt->numberOfConnections++;
4873  }
4874 // 75.7
4875  }
4876 
4877  }
4878 
4879 #else
4880 
4881  std::atomic_int numberOfConnections = connIt->numberOfConnections;
4882  std::mutex mtx; // mutex for critical section
4883 
4884  auto worker = [&, netId, externalNetId, grpSrc, grpDest, gPreStart]
4885  (int gPreN, RadiusRF &connRadius, float connProbability, int connId) {
4886 
4887  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
4888  int gPostStart = groupConfigMDMap[grpDest].gStartN;
4889  for (int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
4890  // check whether pre-neuron location is in RF of post-neuron
4891  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
4892  if (!isPoint3DinRF(connRadius, locPre, locPost))
4893  continue;
4894 
4895  //double r = drand48(); // bingo drand48 -> rand is not thread safe!
4896  //srand::
4897  //double r = (double)(srand() / RAND_MAX);
4898  // https://stackoverflow.com/questions/6161322/using-stdlibs-rand-from-multiple-threads
4899  //double r = drand48_r();
4900  double r = gPostN % 10;
4901  if (r < connProbability) {
4902  //std::lock_guard<std::mutex> guard(mtx);
4903  connectNeuronsMT(mtx, netId, grpSrc, grpDest, gPreN, gPostN, connId, externalNetId); // ISSUE order dependent?
4904  numberOfConnections++;
4905  }
4906  }
4907 
4908  };
4909 
4910  //std::vector<std::thread> pool(groupConfigMDMap[grpSrc].gEndN - groupConfigMDMap[grpSrc].gStartN + 1);
4911  std::vector<std::thread> pool;
4912 
4913  for (int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++)
4914  pool.push_back(move(std::thread(worker, gPreN,
4915  connIt->connRadius,
4916  connIt->connProbability,
4917  connIt->connId)));
4918 
4919  for (int i = 0; i < pool.size(); i++)
4920  pool[i].join();
4921 
4922  connIt->numberOfConnections = numberOfConnections;
4923 
4924 #endif
4925 
4926  printf("connectRandomMT connIt->connId: %d, ->numberOfConnections: %-10d\n", connIt->connId, connIt->numberOfConnections);
4927 
4928  std::list<GroupConfigMD>::iterator grpIt;
4929  GroupConfigMD targetGrp;
4930 
4931  // update numPostSynapses and numPreSynapses of groups in the local network
4932  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4933  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4934  assert(grpIt != groupPartitionLists[netId].end());
4935  grpIt->numPostSynapses += connIt->numberOfConnections;
4936 
4937  targetGrp.gGrpId = grpDest; // the other fields does not matter
4938  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4939  assert(grpIt != groupPartitionLists[netId].end());
4940  grpIt->numPreSynapses += connIt->numberOfConnections;
4941 
4942 
4943  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
4944  if (isExternal) {
4945  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4946  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4947  assert(grpIt != groupPartitionLists[externalNetId].end());
4948  grpIt->numPostSynapses += connIt->numberOfConnections;
4949 
4950  targetGrp.gGrpId = grpDest; // the other fields does not matter
4951  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4952  assert(grpIt != groupPartitionLists[externalNetId].end());
4953  grpIt->numPreSynapses += connIt->numberOfConnections;
4954  }
4955 
4956 
4957 }
4958 #endif
4959 
4960 // FIXME: rewrite user-define call-back function
4961 // user-defined functions called here...
4962 // This is where we define our user-defined call-back function. -- KDC
4963 void SNN::connectUserDefined(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
4964  int grpSrc = connIt->grpSrc;
4965  int grpDest = connIt->grpDest;
4966  int externalNetId = -1;
4967 
4968  if (isExternal) {
4969  externalNetId = groupConfigMDMap[grpDest].netId;
4970  assert(netId != externalNetId);
4971  }
4972 
4973  connIt->maxDelay = 0;
4974  int preStartN = groupConfigMDMap[grpSrc].gStartN;
4975  int postStartN = groupConfigMDMap[grpDest].gStartN;
4976  for (int pre_nid = groupConfigMDMap[grpSrc].gStartN; pre_nid <= groupConfigMDMap[grpSrc].gEndN; pre_nid++) {
4977  //Point3D loc_pre = getNeuronLocation3D(pre_nid); // 3D coordinates of i
4978  for (int post_nid = groupConfigMDMap[grpDest].gStartN; post_nid <= groupConfigMDMap[grpDest].gEndN; post_nid++) {
4979  float weight, maxWt, delay;
4980  bool connected;
4981 
4982  connIt->conn->connect(this, grpSrc, pre_nid - preStartN, grpDest, post_nid - postStartN, weight, maxWt, delay, connected);
4983  if (connected) {
4984  assert(delay >= 1);
4985  assert(delay <= MAX_SYN_DELAY);
4986  assert(abs(weight) <= abs(maxWt));
4987 
4988  if (GET_FIXED_PLASTIC(connIt->connProp) == SYN_FIXED)
4989  maxWt = weight;
4990 
4991  if (fabs(maxWt) > connIt->maxWt)
4992  connIt->maxWt = fabs(maxWt);
4993 
4994  if (delay > connIt->maxDelay)
4995  connIt->maxDelay = delay;
4996 
4997  connectNeurons(netId, grpSrc, grpDest, pre_nid, post_nid, connIt->connId, weight, maxWt, delay, externalNetId);
4998  connIt->numberOfConnections++;
4999  }
5000  }
5001  }
5002 
5003  std::list<GroupConfigMD>::iterator grpIt;
5004  GroupConfigMD targetGrp;
5005 
5006  // update numPostSynapses and numPreSynapses of groups in the local network
5007  targetGrp.gGrpId = grpSrc; // the other fields does not matter
5008  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
5009  assert(grpIt != groupPartitionLists[netId].end());
5010  grpIt->numPostSynapses += connIt->numberOfConnections;
5011 
5012  targetGrp.gGrpId = grpDest; // the other fields does not matter
5013  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
5014  assert(grpIt != groupPartitionLists[netId].end());
5015  grpIt->numPreSynapses += connIt->numberOfConnections;
5016 
5017  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
5018  if (isExternal) {
5019  targetGrp.gGrpId = grpSrc; // the other fields does not matter
5020  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
5021  assert(grpIt != groupPartitionLists[externalNetId].end());
5022  grpIt->numPostSynapses += connIt->numberOfConnections;
5023 
5024  targetGrp.gGrpId = grpDest; // the other fields does not matter
5025  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
5026  assert(grpIt != groupPartitionLists[externalNetId].end());
5027  grpIt->numPreSynapses += connIt->numberOfConnections;
5028  }
5029 }
5030 
5032 //void SNN::connectFull(short int connId) {
5033 // int grpSrc = connectConfigMap[connId].grpSrc;
5034 // int grpDest = connectConfigMap[connId].grpDest;
5035 // bool noDirect = (connectConfigMap[connId].type == CONN_FULL_NO_DIRECT);
5036 //
5037 // // rebuild struct for easier handling
5038 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
5039 //
5040 // for(int i = groupConfigMap[grpSrc].StartN; i <= groupConfigMap[grpSrc].EndN; i++) {
5041 // Point3D loc_i = getNeuronLocation3D(i); // 3D coordinates of i
5042 // for(int j = groupConfigMap[grpDest].StartN; j <= groupConfigMap[grpDest].EndN; j++) { // j: the temp neuron id
5043 // // if flag is set, don't connect direct connections
5044 // if((noDirect) && (i - groupConfigMap[grpSrc].StartN) == (j - groupConfigMap[grpDest].StartN))
5045 // continue;
5046 //
5047 // // check whether pre-neuron location is in RF of post-neuron
5048 // Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
5049 // if (!isPoint3DinRF(radius, loc_i, loc_j))
5050 // continue;
5051 //
5052 // //uint8_t dVal = info->minDelay + (int)(0.5 + (drand48() * (info->maxDelay - info->minDelay)));
5053 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
5054 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
5055 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, i, grpSrc);
5056 //
5057 // setConnection(grpSrc, grpDest, i, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);// info->connId);
5058 // connectConfigMap[connId].numberOfConnections++;
5059 // }
5060 // }
5061 //
5062 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
5063 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
5064 //}
5065 
5066 //void SNN::connectGaussian(short int connId) {
5067 // // rebuild struct for easier handling
5068 // // adjust with sqrt(2) in order to make the Gaussian kernel depend on 2*sigma^2
5069 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
5070 //
5071 // // in case pre and post have different Grid3D sizes: scale pre to the grid size of post
5072 // int grpSrc = connectConfigMap[connId].grpSrc;
5073 // int grpDest = connectConfigMap[connId].grpDest;
5074 // Grid3D grid_i = getGroupGrid3D(grpSrc);
5075 // Grid3D grid_j = getGroupGrid3D(grpDest);
5076 // Point3D scalePre = Point3D(grid_j.numX, grid_j.numY, grid_j.numZ) / Point3D(grid_i.numX, grid_i.numY, grid_i.numZ);
5077 //
5078 // for(int i = groupConfigMap[grpSrc].StartN; i <= groupConfigMap[grpSrc].EndN; i++) {
5079 // Point3D loc_i = getNeuronLocation3D(i)*scalePre; // i: adjusted 3D coordinates
5080 //
5081 // for(int j = groupConfigMap[grpDest].StartN; j <= groupConfigMap[grpDest].EndN; j++) { // j: the temp neuron id
5082 // // check whether pre-neuron location is in RF of post-neuron
5083 // Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
5084 //
5085 // // make sure point is in RF
5086 // double rfDist = getRFDist3D(radius,loc_i,loc_j);
5087 // if (rfDist < 0.0 || rfDist > 1.0)
5088 // continue;
5089 //
5090 // // if rfDist is valid, it returns a number between 0 and 1
5091 // // we want these numbers to fit to Gaussian weigths, so that rfDist=0 corresponds to max Gaussian weight
5092 // // and rfDist=1 corresponds to 0.1 times max Gaussian weight
5093 // // so we're looking at gauss = exp(-a*rfDist), where a such that exp(-a)=0.1
5094 // // solving for a, we find that a = 2.3026
5095 // double gauss = exp(-2.3026*rfDist);
5096 // if (gauss < 0.1)
5097 // continue;
5098 //
5099 // if (drand48() < connectConfigMap[connId].p) {
5100 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
5101 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
5102 // float synWt = gauss * connectConfigMap[connId].initWt; // scale weight according to gauss distance
5103 // setConnection(grpSrc, grpDest, i, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);//info->connId);
5104 // connectConfigMap[connId].numberOfConnections++;
5105 // }
5106 // }
5107 // }
5108 //
5109 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
5110 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
5111 //}
5112 //
5113 //void SNN::connectOneToOne(short int connId) {
5114 // int grpSrc = connectConfigMap[connId].grpSrc;
5115 // int grpDest = connectConfigMap[connId].grpDest;
5116 // assert( groupConfigMap[grpDest].SizeN == groupConfigMap[grpSrc].SizeN );
5117 //
5118 // // NOTE: RadiusRF does not make a difference here: ignore
5119 // for(int nid=groupConfigMap[grpSrc].StartN,j=groupConfigMap[grpDest].StartN; nid<=groupConfigMap[grpSrc].EndN; nid++, j++) {
5120 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
5121 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
5122 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, nid, grpSrc);
5123 // setConnection(grpSrc, grpDest, nid, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);//info->connId);
5124 // connectConfigMap[connId].numberOfConnections++;
5125 // }
5126 //
5127 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
5128 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
5129 //}
5130 //
5132 //void SNN::connectRandom(short int connId) {
5133 // int grpSrc = connectConfigMap[connId].grpSrc;
5134 // int grpDest = connectConfigMap[connId].grpDest;
5135 //
5136 // // rebuild struct for easier handling
5137 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
5138 //
5139 // for(int pre_nid = groupConfigMap[grpSrc].StartN; pre_nid <= groupConfigMap[grpSrc].EndN; pre_nid++) {
5140 // Point3D loc_pre = getNeuronLocation3D(pre_nid); // 3D coordinates of i
5141 // for(int post_nid = groupConfigMap[grpDest].StartN; post_nid <= groupConfigMap[grpDest].EndN; post_nid++) {
5142 // // check whether pre-neuron location is in RF of post-neuron
5143 // Point3D loc_post = getNeuronLocation3D(post_nid); // 3D coordinates of j
5144 // if (!isPoint3DinRF(radius, loc_pre, loc_post))
5145 // continue;
5146 //
5147 // if (drand48() < connectConfigMap[connId].p) {
5148 // //uint8_t dVal = info->minDelay + (int)(0.5+(drand48()*(info->maxDelay-info->minDelay)));
5149 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
5150 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
5151 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, pre_nid, grpSrc);
5152 // setConnection(grpSrc, grpDest, pre_nid, post_nid, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId); //info->connId);
5153 // connectConfigMap[connId].numberOfConnections++;
5154 // }
5155 // }
5156 // }
5157 //
5158 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
5159 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
5160 //}
5161 //
5164 //void SNN::connectUserDefined(short int connId) {
5165 // int grpSrc = connectConfigMap[connId].grpSrc;
5166 // int grpDest = connectConfigMap[connId].grpDest;
5167 // connectConfigMap[connId].maxDelay = 0;
5168 // for(int nid=groupConfigMap[grpSrc].StartN; nid<=groupConfigMap[grpSrc].EndN; nid++) {
5169 // for(int nid2=groupConfigMap[grpDest].StartN; nid2 <= groupConfigMap[grpDest].EndN; nid2++) {
5170 // int srcId = nid - groupConfigMap[grpSrc].StartN;
5171 // int destId = nid2 - groupConfigMap[grpDest].StartN;
5172 // float weight, maxWt, delay;
5173 // bool connected;
5174 //
5175 // connectConfigMap[connId].conn->connect(this, grpSrc, srcId, grpDest, destId, weight, maxWt, delay, connected);
5176 // if(connected) {
5177 // if (GET_FIXED_PLASTIC(connectConfigMap[connId].connProp) == SYN_FIXED)
5178 // maxWt = weight;
5179 //
5180 // connectConfigMap[connId].maxWt = maxWt;
5181 //
5182 // assert(delay >= 1);
5183 // assert(delay <= MAX_SYN_DELAY);
5184 // assert(abs(weight) <= abs(maxWt));
5185 //
5186 // // adjust the sign of the weight based on inh/exc connection
5187 // weight = isExcitatoryGroup(grpSrc) ? fabs(weight) : -1.0*fabs(weight);
5188 // maxWt = isExcitatoryGroup(grpSrc) ? fabs(maxWt) : -1.0*fabs(maxWt);
5189 //
5190 // setConnection(grpSrc, grpDest, nid, nid2, weight, maxWt, delay, connectConfigMap[connId].connProp, connId);// info->connId);
5191 // connectConfigMap[connId].numberOfConnections++;
5192 // if(delay > connectConfigMap[connId].maxDelay) {
5193 // connectConfigMap[connId].maxDelay = delay;
5194 // }
5195 // }
5196 // }
5197 // }
5198 //
5199 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
5200 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
5201 //}
5202 
5203 void SNN::deleteRuntimeData() {
5204  // FIXME: assert simulation use GPU first
5205  // wait for kernels to complete
5206 #ifndef __NO_CUDA__
5207  CUDA_CHECK_ERRORS(cudaThreadSynchronize());
5208 #endif
5209 
5210  #ifndef __NO_PTHREADS__ // POSIX
5211  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
5212  cpu_set_t cpus;
5213  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
5214  int threadCount = 0;
5215  #endif
5216 
5217  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5218  if (!groupPartitionLists[netId].empty()) {
5219  if (netId < CPU_RUNTIME_BASE) // GPU runtime
5220  deleteRuntimeData_GPU(netId);
5221  else{ // CPU runtime
5222  #ifdef __NO_PTHREADS__
5223  deleteRuntimeData_CPU(netId);
5224  #else // Linux or MAC
5225  pthread_attr_t attr;
5226  pthread_attr_init(&attr);
5227  CPU_ZERO(&cpus);
5228  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
5229  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
5230 
5231  argsThreadRoutine[threadCount].snn_pointer = this;
5232  argsThreadRoutine[threadCount].netId = netId;
5233  argsThreadRoutine[threadCount].lGrpId = 0;
5234  argsThreadRoutine[threadCount].startIdx = 0;
5235  argsThreadRoutine[threadCount].endIdx = 0;
5236  argsThreadRoutine[threadCount].GtoLOffset = 0;
5237 
5238  pthread_create(&threads[threadCount], &attr, &SNN::helperDeleteRuntimeData_CPU, (void*)&argsThreadRoutine[threadCount]);
5239  pthread_attr_destroy(&attr);
5240  threadCount++;
5241  #endif
5242  }
5243  }
5244  }
5245 
5246  #ifndef __NO_PTHREADS__ // POSIX
5247  // join all the threads
5248  for (int i=0; i<threadCount; i++){
5249  pthread_join(threads[i], NULL);
5250  }
5251  #endif
5252 
5253 #ifndef __NO_CUDA__
5254  CUDA_DELETE_TIMER(timer);
5255 #endif
5256 }
5257 
5258 // delete all objects (CPU and GPU side)
5259 void SNN::deleteObjects() {
5260  if (simulatorDeleted)
5261  return;
5262 
5263  printSimSummary();
5264 
5265  // deallocate objects
5266  resetMonitors(true);
5267  resetConnectionConfigs(true);
5268 #ifdef LN_I_CALC_TYPES
5269  resetGroupConfigs(true);
5270 #endif
5271 
5272 
5273  // delete manager runtime data
5274  deleteManagerRuntimeData();
5275 
5276  deleteRuntimeData();
5277 
5278  // fclose file streams, unless in custom mode
5279  if (loggerMode_ != CUSTOM) {
5280  // don't fclose if it's stdout or stderr, otherwise they're gonna stay closed for the rest of the process
5281  if (fpInf_ != NULL && fpInf_ != stdout && fpInf_ != stderr)
5282  fclose(fpInf_);
5283  if (fpErr_ != NULL && fpErr_ != stdout && fpErr_ != stderr)
5284  fclose(fpErr_);
5285  if (fpDeb_ != NULL && fpDeb_ != stdout && fpDeb_ != stderr)
5286  fclose(fpDeb_);
5287  if (fpLog_ != NULL && fpLog_ != stdout && fpLog_ != stderr)
5288  fclose(fpLog_);
5289  }
5290 
5291  simulatorDeleted = true;
5292 }
5293 
5294 void SNN::findMaxNumSynapsesGroups(int* _maxNumPostSynGrp, int* _maxNumPreSynGrp) {
5295  *_maxNumPostSynGrp = 0;
5296  *_maxNumPreSynGrp = 0;
5297 
5298  // scan all the groups and find the required information
5299  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
5300  // find the values for maximum postsynaptic length
5301  // and maximum pre-synaptic length
5302  if (groupConfigMDMap[gGrpId].numPostSynapses > *_maxNumPostSynGrp)
5303  *_maxNumPostSynGrp = groupConfigMDMap[gGrpId].numPostSynapses;
5304  if (groupConfigMDMap[gGrpId].numPreSynapses > *_maxNumPreSynGrp)
5305  *_maxNumPreSynGrp = groupConfigMDMap[gGrpId].numPreSynapses;
5306  }
5307 }
5308 
5309 void SNN::findMaxNumSynapsesNeurons(int _netId, int& _maxNumPostSynN, int& _maxNumPreSynN) {
5310  int *tempNpre, *tempNpost;
5311  int nSrc, nDest, numNeurons;
5312  std::map<int, int> globalToLocalOffset;
5313 
5314  numNeurons = networkConfigs[_netId].numNAssigned;
5315  tempNpre = new int[numNeurons];
5316  tempNpost = new int[numNeurons];
5317  memset(tempNpre, 0, sizeof(int) * numNeurons);
5318  memset(tempNpost, 0, sizeof(int) * numNeurons);
5319 
5320  // load offset between global neuron id and local neuron id
5321  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
5322  globalToLocalOffset[grpIt->gGrpId] = grpIt->GtoLOffset;
5323  }
5324 
5325  // calculate number of pre- and post- connections of each neuron
5326 #ifdef LN_FIX_CONNLIST_
5327  for (std::vector<ConnectionInfo>::iterator connIt = connectionLists[_netId].begin(); connIt != connectionLists[_netId].end(); connIt++) {
5328 #else
5329  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[_netId].begin(); connIt != connectionLists[_netId].end(); connIt++) {
5330 #endif
5331  nSrc = connIt->nSrc + globalToLocalOffset[connIt->grpSrc];
5332  nDest = connIt->nDest + globalToLocalOffset[connIt->grpDest];
5333  assert(nSrc < numNeurons); assert(nDest < numNeurons);
5334  tempNpost[nSrc]++;
5335  tempNpre[nDest]++;
5336  }
5337 
5338  // find out the maximum number of pre- and post- connections among neurons in a local network
5339  _maxNumPostSynN = 0;
5340  _maxNumPreSynN = 0;
5341  for (int nId = 0; nId < networkConfigs[_netId].numN; nId++) {
5342  if (tempNpost[nId] > _maxNumPostSynN) _maxNumPostSynN = tempNpost[nId];
5343  if (tempNpre[nId] > _maxNumPreSynN) _maxNumPreSynN = tempNpre[nId];
5344  }
5345 
5346  delete [] tempNpre;
5347  delete [] tempNpost;
5348 }
5349 
5350 void SNN::findMaxSpikesD1D2(int _netId, unsigned int& _maxSpikesD1, unsigned int& _maxSpikesD2) {
5351  _maxSpikesD1 = 0; _maxSpikesD2 = 0;
5352  for(std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
5353  if (grpIt->maxOutgoingDelay == 1)
5354  _maxSpikesD1 += (groupConfigMap[grpIt->gGrpId].numN * NEURON_MAX_FIRING_RATE);
5355  else
5356  _maxSpikesD2 += (groupConfigMap[grpIt->gGrpId].numN * NEURON_MAX_FIRING_RATE);
5357  }
5358 }
5359 
5360 void SNN::findNumN(int _netId, int& _numN, int& _numNExternal, int& _numNAssigned,
5361  int& _numNReg, int& _numNExcReg, int& _numNInhReg,
5362  int& _numNPois, int& _numNExcPois, int& _numNInhPois) {
5363  _numN = 0; _numNExternal = 0; _numNAssigned = 0;
5364  _numNReg = 0; _numNExcReg = 0; _numNInhReg = 0;
5365  _numNPois = 0; _numNExcPois = 0; _numNInhPois = 0;
5366  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
5367  int sizeN = groupConfigMap[grpIt->gGrpId].numN;
5368  unsigned int type = groupConfigMap[grpIt->gGrpId].type;
5369  if (IS_EXCITATORY_TYPE(type) && (type & POISSON_NEURON) && grpIt->netId == _netId) {
5370  _numN += sizeN; _numNPois += sizeN; _numNExcPois += sizeN;
5371  } else if (IS_INHIBITORY_TYPE(type) && (type & POISSON_NEURON) && grpIt->netId == _netId) {
5372  _numN += sizeN; _numNPois += sizeN; _numNInhPois += sizeN;
5373  } else if (IS_EXCITATORY_TYPE(type) && !(type & POISSON_NEURON) && grpIt->netId == _netId) {
5374  _numN += sizeN; _numNReg += sizeN; _numNExcReg += sizeN;
5375  } else if (IS_INHIBITORY_TYPE(type) && !(type & POISSON_NEURON) && grpIt->netId == _netId) {
5376  _numN += sizeN; _numNReg += sizeN; _numNInhReg += sizeN;
5377  } else if (grpIt->netId != _netId) {
5378  _numNExternal += sizeN;
5379  } else {
5380  KERNEL_ERROR("Can't find catagory for the group [%d] ", grpIt->gGrpId);
5382  }
5383  _numNAssigned += sizeN;
5384  }
5385 
5386  assert(_numNReg == _numNExcReg + _numNInhReg);
5387  assert(_numNPois == _numNExcPois + _numNInhPois);
5388  assert(_numN == _numNReg + _numNPois);
5389  assert(_numNAssigned == _numN + _numNExternal);
5390 }
5391 
5392 void SNN::findNumNSpikeGenAndOffset(int _netId) {
5393  networkConfigs[_netId].numNSpikeGen = 0;
5394 
5395  for(int lGrpId = 0; lGrpId < networkConfigs[_netId].numGroups; lGrpId++) {
5396  if (_netId == groupConfigs[_netId][lGrpId].netId && groupConfigs[_netId][lGrpId].isSpikeGenerator && groupConfigs[_netId][lGrpId].isSpikeGenFunc) {
5397  groupConfigs[_netId][lGrpId].Noffset = networkConfigs[_netId].numNSpikeGen;
5398  networkConfigs[_netId].numNSpikeGen += groupConfigs[_netId][lGrpId].numN;
5399  }
5400  }
5401 
5402  assert(networkConfigs[_netId].numNSpikeGen <= networkConfigs[_netId].numNPois);
5403 }
5404 
5405 void SNN::findNumSynapsesNetwork(int _netId, int& _numPostSynNet, int& _numPreSynNet) {
5406  _numPostSynNet = 0;
5407  _numPreSynNet = 0;
5408 
5409  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
5410  _numPostSynNet += grpIt->numPostSynapses;
5411  _numPreSynNet += grpIt->numPreSynapses;
5412  assert(_numPostSynNet < INT_MAX);
5413  assert(_numPreSynNet < INT_MAX);
5414  }
5415 
5416  assert(_numPreSynNet == _numPostSynNet);
5417 }
5418 
5419 void SNN::fetchGroupState(int netId, int lGrpId) {
5420  if (netId < CPU_RUNTIME_BASE)
5421  copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
5422  else
5423  copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false);
5424 }
5425 
5426 void SNN::fetchWeightState(int netId, int lGrpId) {
5427  if (netId < CPU_RUNTIME_BASE)
5428  copyWeightState(netId, lGrpId, cudaMemcpyDeviceToHost);
5429  else
5430  copyWeightState(netId, lGrpId);
5431 }
5432 
5438 void SNN::fetchNeuronSpikeCount (int gGrpId) {
5439  if (gGrpId == ALL) {
5440  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
5441  fetchNeuronSpikeCount(gGrpId);
5442  }
5443  } else {
5444  int netId = groupConfigMDMap[gGrpId].netId;
5445  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
5446  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
5447 
5448  if (netId < CPU_RUNTIME_BASE)
5449  copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
5450  else
5451  copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
5452  }
5453 }
5454 
5455 void SNN::fetchSTPState(int gGrpId) {
5456 }
5457 
5463 void SNN::fetchConductanceAMPA(int gGrpId) {
5464  if (gGrpId == ALL) {
5465  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
5466  fetchConductanceAMPA(gGrpId);
5467  }
5468  } else {
5469  int netId = groupConfigMDMap[gGrpId].netId;
5470  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
5471  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
5472 
5473  if (netId < CPU_RUNTIME_BASE)
5474  copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
5475  else
5476  copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
5477  }
5478 }
5479 
5485 void SNN::fetchConductanceNMDA(int gGrpId) {
5486  if (gGrpId == ALL) {
5487  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
5488  fetchConductanceNMDA(gGrpId);
5489  }
5490  } else {
5491  int netId = groupConfigMDMap[gGrpId].netId;
5492  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
5493  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
5494 
5495  if (netId < CPU_RUNTIME_BASE)
5496  copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
5497  else
5498  copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
5499  }
5500 }
5501 
5507 void SNN::fetchConductanceGABAa(int gGrpId) {
5508  if (gGrpId == ALL) {
5509  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
5510  fetchConductanceGABAa(gGrpId);
5511  }
5512  } else {
5513  int netId = groupConfigMDMap[gGrpId].netId;
5514  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
5515  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
5516 
5517  if (netId < CPU_RUNTIME_BASE)
5518  copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
5519  else
5520  copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
5521  }
5522 }
5523 
5529 void SNN::fetchConductanceGABAb(int gGrpId) {
5530  if (gGrpId == ALL) {
5531  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
5532  fetchConductanceGABAb(gGrpId);
5533  }
5534  } else {
5535  int netId = groupConfigMDMap[gGrpId].netId;
5536  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
5537  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
5538 
5539  if (netId < CPU_RUNTIME_BASE)
5540  copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
5541  else
5542  copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
5543  }
5544 }
5545 
5546 
5547 void SNN::fetchGrpIdsLookupArray(int netId) {
5548  if (netId < CPU_RUNTIME_BASE)
5549  copyGrpIdsLookupArray(netId, cudaMemcpyDeviceToHost);
5550  else
5551  copyGrpIdsLookupArray(netId);
5552 }
5553 
5554 void SNN::fetchConnIdsLookupArray(int netId) {
5555  if (netId < CPU_RUNTIME_BASE)
5556  copyConnIdsLookupArray(netId, cudaMemcpyDeviceToHost);
5557  else
5558  copyConnIdsLookupArray(netId);
5559 }
5560 
5561 void SNN::fetchLastSpikeTime(int netId) {
5562  if (netId < CPU_RUNTIME_BASE)
5563  copyLastSpikeTime(netId, cudaMemcpyDeviceToHost);
5564  else
5565  copyLastSpikeTime(netId);
5566 }
5567 
5571 void SNN::fetchCurSpike(int netId) {
5572  assert(netId < MAX_NET_PER_SNN);
5573 
5574  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
5575  copyCurSpikes(netId, cudaMemcpyDeviceToHost);
5576  } else { // CPU runtime
5577  copyCurSpikes(netId);
5578  }
5579 }
5580 
5583 void SNN::fetchRandNum(int netId) {
5584  assert(netId < MAX_NET_PER_SNN);
5585 
5586  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
5587  copyRandNum(netId, cudaMemcpyDeviceToHost);
5588  } else { // CPU runtime
5589  copyRandNum(netId);
5590  }
5591 }
5592 
5595 void SNN::fetchPoissonFireRate(int netId) {
5596  assert(netId < MAX_NET_PER_SNN);
5597 
5598  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
5599  copyPoissonFireRate(netId, cudaMemcpyDeviceToHost);
5600  } else { // CPU runtime
5601  copyPoissonFireRate(netId);
5602  }
5603 }
5604 
5607 void SNN::fetchSpikeGenBits(int netId) {
5608  assert(netId < MAX_NET_PER_SNN);
5609 
5610  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
5611  copySpikeGenBits(netId, cudaMemcpyDeviceToHost);
5612  } else { // CPU runtime
5613  copySpikeGenBits(netId);
5614  }
5615 }
5616 
5617 
5618 
5619 
5620 void SNN::fetchPreConnectionInfo(int netId) {
5621  if (netId < CPU_RUNTIME_BASE)
5622  copyPreConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
5623  else
5624  copyPreConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], false);
5625 }
5626 
5627 void SNN::fetchPostConnectionInfo(int netId) {
5628  if (netId < CPU_RUNTIME_BASE)
5629  copyPostConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
5630  else
5631  copyPostConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], false);
5632 }
5633 
5634 void SNN::fetchSynapseState(int netId) {
5635  if (netId < CPU_RUNTIME_BASE)
5636  copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
5637  else
5638  copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], false);
5639 }
5640 
5641 
5645 void SNN::fetchNetworkSpikeCount() {
5646  unsigned int spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2;
5647 
5648  managerRuntimeData.spikeCountD1 = 0;
5649  managerRuntimeData.spikeCountD2 = 0;
5650  managerRuntimeData.spikeCountExtRxD2 = 0;
5651  managerRuntimeData.spikeCountExtRxD1 = 0;
5652  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5653  if (!groupPartitionLists[netId].empty()) {
5654 
5655  if (netId < CPU_RUNTIME_BASE) {
5656  copyNetworkSpikeCount(netId, cudaMemcpyDeviceToHost,
5657  &spikeCountD1, &spikeCountD2,
5658  &spikeCountExtD1, &spikeCountExtD2);
5659  //printf("netId:%d, D1:%d/D2:%d, extD1:%d/D2:%d\n", netId, spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2);
5660  } else {
5661  copyNetworkSpikeCount(netId,
5662  &spikeCountD1, &spikeCountD2,
5663  &spikeCountExtD1, &spikeCountExtD2);
5664  //printf("netId:%d, D1:%d/D2:%d, extD1:%d/D2:%d\n", netId, spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2);
5665  }
5666 
5667  managerRuntimeData.spikeCountD2 += spikeCountD2 - spikeCountExtD2;
5668  managerRuntimeData.spikeCountD1 += spikeCountD1 - spikeCountExtD1;
5669  managerRuntimeData.spikeCountExtRxD2 += spikeCountExtD2;
5670  managerRuntimeData.spikeCountExtRxD1 += spikeCountExtD1;
5671  }
5672  }
5673 
5674  managerRuntimeData.spikeCount = managerRuntimeData.spikeCountD1 + managerRuntimeData.spikeCountD2;
5675 }
5676 
5677 void SNN::fetchSpikeTables(int netId) {
5678  if (netId < CPU_RUNTIME_BASE)
5679  copySpikeTables(netId, cudaMemcpyDeviceToHost);
5680  else
5681  copySpikeTables(netId);
5682 }
5683 
5684 void SNN::fetchNeuronStateBuffer(int netId, int lGrpId) {
5685  if (netId < CPU_RUNTIME_BASE)
5686  copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
5687  else
5688  copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false);
5689 }
5690 
5691 void SNN::fetchExtFiringTable(int netId) {
5692  assert(netId < MAX_NET_PER_SNN);
5693 
5694  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
5695  copyExtFiringTable(netId, cudaMemcpyDeviceToHost);
5696  } else { // CPU runtime
5697  copyExtFiringTable(netId);
5698  }
5699 }
5700 
5701 void SNN::fetchTimeTable(int netId) {
5702  assert(netId < MAX_NET_PER_SNN);
5703 
5704  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
5705  copyTimeTable(netId, cudaMemcpyDeviceToHost);
5706  } else {
5707  copyTimeTable(netId, true);
5708  }
5709 }
5710 
5711 void SNN::writeBackTimeTable(int netId) {
5712  assert(netId < MAX_NET_PER_SNN);
5713 
5714  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
5715  copyTimeTable(netId, cudaMemcpyHostToDevice);
5716  } else {
5717  copyTimeTable(netId, false);
5718  }
5719 }
5720 
5721 void SNN::transferSpikes(void* dest, int destNetId, void* src, int srcNetId, int size) {
5722 #ifndef __NO_CUDA__
5723  if (srcNetId < CPU_RUNTIME_BASE && destNetId < CPU_RUNTIME_BASE) {
5724  checkAndSetGPUDevice(destNetId);
5725  CUDA_CHECK_ERRORS(cudaMemcpyPeer(dest, destNetId, src, srcNetId, size));
5726  } else if (srcNetId >= CPU_RUNTIME_BASE && destNetId < CPU_RUNTIME_BASE) {
5727  checkAndSetGPUDevice(destNetId);
5728  CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice));
5729  } else if (srcNetId < CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE) {
5730  checkAndSetGPUDevice(srcNetId);
5731  CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost));
5732  } else if(srcNetId >= CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE) {
5733  memcpy(dest, src, size);
5734  }
5735 #else
5736  assert(srcNetId >= CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE);
5737  memcpy(dest, src, size);
5738 #endif
5739 }
5740 
5741 void SNN::convertExtSpikesD2(int netId, int startIdx, int endIdx, int GtoLOffset) {
5742  if (netId < CPU_RUNTIME_BASE)
5743  convertExtSpikesD2_GPU(netId, startIdx, endIdx, GtoLOffset);
5744  else
5745  convertExtSpikesD2_CPU(netId, startIdx, endIdx, GtoLOffset);
5746 }
5747 
5748 void SNN::convertExtSpikesD1(int netId, int startIdx, int endIdx, int GtoLOffset) {
5749  if (netId < CPU_RUNTIME_BASE)
5750  convertExtSpikesD1_GPU(netId, startIdx, endIdx, GtoLOffset);
5751  else
5752  convertExtSpikesD1_CPU(netId, startIdx, endIdx, GtoLOffset);
5753 }
5754 
5755 void SNN::routeSpikes() {
5756  int firingTableIdxD2, firingTableIdxD1;
5757  int GtoLOffset;
5758 
5759  for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) {
5760  int srcNetId = rteItr->srcNetId;
5761  int destNetId = rteItr->destNetId;
5762 
5763  fetchExtFiringTable(srcNetId);
5764 
5765  fetchTimeTable(destNetId);
5766  firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
5767  firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
5768  //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
5769  //printf("srcNetId %d,destNetId %d, D1:%d/D2:%d\n", srcNetId, destNetId, firingTableIdxD1, firingTableIdxD2);
5770 
5771  #ifndef __NO_PTHREADS__ // POSIX
5772  pthread_t threads[(2 * networkConfigs[srcNetId].numGroups) + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
5773  cpu_set_t cpus;
5774  ThreadStruct argsThreadRoutine[(2 * networkConfigs[srcNetId].numGroups) + 1]; // same as above, +1 array size
5775  int threadCount = 0;
5776  #endif
5777 
5778  for (int lGrpId = 0; lGrpId < networkConfigs[srcNetId].numGroups; lGrpId++) {
5779  if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
5780  // search GtoLOffset of the neural group at destination local network
5781  bool isFound = false;
5782  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
5783  if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
5784  GtoLOffset = grpIt->GtoLOffset;
5785  isFound = true;
5786  break;
5787  }
5788  }
5789 
5790  if (isFound) {
5791  transferSpikes(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId,
5792  managerRuntimeData.extFiringTableD2[lGrpId], srcNetId,
5793  sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]);
5794 
5795  if (destNetId < CPU_RUNTIME_BASE){
5796  convertExtSpikesD2_GPU(destNetId, firingTableIdxD2,
5797  firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
5798  GtoLOffset); // [StartIdx, EndIdx)
5799  }
5800  else{// CPU runtime
5801  #ifdef __NO_PTHREADS__
5802  convertExtSpikesD2_CPU(destNetId, firingTableIdxD2,
5803  firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
5804  GtoLOffset); // [StartIdx, EndIdx)
5805  #else // Linux or MAC
5806  pthread_attr_t attr;
5807  pthread_attr_init(&attr);
5808  CPU_ZERO(&cpus);
5809  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
5810  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
5811 
5812  argsThreadRoutine[threadCount].snn_pointer = this;
5813  argsThreadRoutine[threadCount].netId = destNetId;
5814  argsThreadRoutine[threadCount].lGrpId = 0;
5815  argsThreadRoutine[threadCount].startIdx = firingTableIdxD2;
5816  argsThreadRoutine[threadCount].endIdx = firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
5817  argsThreadRoutine[threadCount].GtoLOffset = GtoLOffset;
5818 
5819  pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD2_CPU, (void*)&argsThreadRoutine[threadCount]);
5820  pthread_attr_destroy(&attr);
5821  threadCount++;
5822  #endif
5823  }
5824 
5825  firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
5826  }
5827  }
5828 
5829  if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
5830  // search GtoLOffset of the neural group at destination local network
5831  bool isFound = false;
5832  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
5833  if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
5834  GtoLOffset = grpIt->GtoLOffset;
5835  isFound = true;
5836  break;
5837  }
5838  }
5839 
5840  if (isFound) {
5841  transferSpikes(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId,
5842  managerRuntimeData.extFiringTableD1[lGrpId], srcNetId,
5843  sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]);
5844  if (destNetId < CPU_RUNTIME_BASE){
5845  convertExtSpikesD1_GPU(destNetId, firingTableIdxD1,
5846  firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
5847  GtoLOffset); // [StartIdx, EndIdx)
5848  }
5849  else{// CPU runtime
5850  #ifdef __NO_PTHREADS__
5851  convertExtSpikesD1_CPU(destNetId, firingTableIdxD1,
5852  firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
5853  GtoLOffset); // [StartIdx, EndIdx)
5854  #else // Linux or MAC
5855  pthread_attr_t attr;
5856  pthread_attr_init(&attr);
5857  CPU_ZERO(&cpus);
5858  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
5859  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
5860 
5861  argsThreadRoutine[threadCount].snn_pointer = this;
5862  argsThreadRoutine[threadCount].netId = destNetId;
5863  argsThreadRoutine[threadCount].lGrpId = 0;
5864  argsThreadRoutine[threadCount].startIdx = firingTableIdxD1;
5865  argsThreadRoutine[threadCount].endIdx = firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
5866  argsThreadRoutine[threadCount].GtoLOffset = GtoLOffset;
5867 
5868  pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD1_CPU, (void*)&argsThreadRoutine[threadCount]);
5869  pthread_attr_destroy(&attr);
5870  threadCount++;
5871  #endif
5872  }
5873  firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
5874  }
5875  }
5876  //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
5877  }
5878 
5879  #ifndef __NO_PTHREADS__ // POSIX
5880  // join all the threads
5881  for (int i=0; i<threadCount; i++){
5882  pthread_join(threads[i], NULL);
5883  }
5884  #endif
5885 
5886  managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
5887  managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
5888  writeBackTimeTable(destNetId);
5889  }
5890 }
5891 
5892 //We need pass the neuron id (nid) and the grpId just for the case when we want to
5893 //ramp up/down the weights. In that case we need to set the weights of each synapse
5894 //depending on their nid (their position with respect to one another). -- KDC
5895 float SNN::generateWeight(int connProp, float initWt, float maxWt, int nid, int grpId) {
5896  float actWts;
5898  //bool setRandomWeights = GET_INITWTS_RANDOM(connProp);
5899  //bool setRampDownWeights = GET_INITWTS_RAMPDOWN(connProp);
5900  //bool setRampUpWeights = GET_INITWTS_RAMPUP(connProp);
5901 
5902  //if (setRandomWeights)
5903  // actWts = initWt * drand48();
5904  //else if (setRampUpWeights)
5905  // actWts = (initWt + ((nid - groupConfigs[0][grpId].StartN) * (maxWt - initWt) / groupConfigs[0][grpId].SizeN));
5906  //else if (setRampDownWeights)
5907  // actWts = (maxWt - ((nid - groupConfigs[0][grpId].StartN) * (maxWt - initWt) / groupConfigs[0][grpId].SizeN));
5908  //else
5909  actWts = initWt;
5910 
5911  return actWts;
5912 }
5913 
5914 // checks whether a connection ID contains plastic synapses O(#connections)
5915 bool SNN::isConnectionPlastic(short int connId) {
5916  assert(connId != ALL);
5917  assert(connId < numConnections);
5918 
5919  return GET_FIXED_PLASTIC(connectConfigMap[connId].connProp);
5920 }
5921 
5922 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
5923 // or groupConfigMap is not sync with groupConfigs[0][]
5924 // returns whether group has homeostasis enabled
5926  assert(grpId>=0 && grpId<getNumGroups());
5927  return (groupConfigMap[grpId].homeoConfig.WithHomeostasis);
5928 }
5929 
5930 // performs various verification checkups before building the network
5931 void SNN::verifyNetwork() {
5932  // make sure number of neuron parameters have been accumulated correctly
5933  // NOTE: this used to be updateParameters
5934  //verifyNumNeurons();
5935 
5936  // make sure compartment config is valid
5937  verifyCompartments();
5938 
5939  // make sure STDP post-group has some incoming plastic connections
5940  verifySTDP();
5941 
5942  // make sure every group with homeostasis also has STDP
5943  verifyHomeostasis();
5944 
5945  // make sure the max delay is within bound
5946  assert(glbNetworkConfig.maxDelay <= MAX_SYN_DELAY);
5947 
5948  // make sure there is sufficient buffer
5949  //if ((networkConfigs[0].maxSpikesD1 + networkConfigs[0].maxSpikesD2) < (numNExcReg + numNInhReg + numNPois) * UNKNOWN_NEURON_MAX_FIRING_RATE) {
5950  // KERNEL_ERROR("Insufficient amount of buffer allocated...");
5951  // exitSimulation(1);
5952  //}
5953 
5954  //make sure the number of pre- and post-connection does not exceed the limitation
5955  //if (maxNumPostSynGrp > MAX_NUM_POST_SYN) {
5956  // for (int g = 0; g < numGroups; g++) {
5957  // if (groupConfigMap[g].numPostSynapses>MAX_NUM_POST_SYN)
5958  // KERNEL_ERROR("Grp: %s(%d) has too many output synapses (%d), max %d.",groupInfo[g].Name.c_str(),g,
5959  // groupConfigMap[g].numPostSynapses,MAX_NUM_POST_SYN);
5960  // }
5961  // assert(maxNumPostSynGrp <= MAX_NUM_POST_SYN);
5962  //}
5963 
5964  //if (maxNumPreSynGrp > MAX_NUM_PRE_SYN) {
5965  // for (int g = 0; g < numGroups; g++) {
5966  // if (groupConfigMap[g].numPreSynapses>MAX_NUM_PRE_SYN)
5967  // KERNEL_ERROR("Grp: %s(%d) has too many input synapses (%d), max %d.",groupInfo[g].Name.c_str(),g,
5968  // groupConfigMap[g].numPreSynapses,MAX_NUM_PRE_SYN);
5969  // }
5970  // assert(maxNumPreSynGrp <= MAX_NUM_PRE_SYN);
5971  //}
5972 
5973  // make sure maxDelay == 1 if STP is enableed
5974  // \FIXME: need to figure out STP buffer for delays > 1
5975  if (sim_with_stp && glbNetworkConfig.maxDelay > 1) {
5976  KERNEL_ERROR("STP with delays > 1 ms is currently not supported.");
5978  }
5979 
5980  if (glbNetworkConfig.maxDelay > MAX_SYN_DELAY) {
5981  KERNEL_ERROR("You are using a synaptic delay (%d) greater than MAX_SYN_DELAY defined in config.h", glbNetworkConfig.maxDelay);
5983  }
5984 }
5985 
5986 void SNN::verifyCompartments() {
5987  for (std::map<int, compConnectConfig>::iterator it = compConnectConfigMap.begin(); it != compConnectConfigMap.end(); it++)
5988  {
5989  int grpLower = it->second.grpSrc;
5990  int grpUpper = it->second.grpDest;
5991 
5992  // make sure groups are compartmentally enabled
5993  if (!groupConfigMap[grpLower].withCompartments) {
5994  KERNEL_ERROR("Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
5995  groupConfigMap[grpLower].grpName.c_str(), grpLower);
5997  }
5998  if (!groupConfigMap[grpUpper].withCompartments) {
5999  KERNEL_ERROR("Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
6000  groupConfigMap[grpUpper].grpName.c_str(), grpUpper);
6002  }
6003  }
6004 }
6005 
6006 // checks whether STDP is set on a plastic connection
6007 void SNN::verifySTDP() {
6008  for (int gGrpId=0; gGrpId<getNumGroups(); gGrpId++) {
6009  if (groupConfigMap[gGrpId].WithSTDP) {
6010  // for each post-group, check if any of the incoming connections are plastic
6011  bool isAnyPlastic = false;
6012  for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
6013 //LN2021 Adhoc Fix -> Kexin ?
6014 //
6016  //if (it->second.stdpConfig.WithSTDP) {
6017  // if (GET_FIXED_PLASTIC(it->second.connProp)) {
6018  // break;
6019  // } else {
6020  // KERNEL_ERROR("If STDP on connection %d is set, connection must be plastic.",it->second.connId);
6021  // exitSimulation(1);
6022  // }
6023  //}
6024  isAnyPlastic |= GET_FIXED_PLASTIC(it->second.connProp);
6025  if (isAnyPlastic) {
6026  // at least one plastic connection found: break while
6027  break;
6028  }
6029  }
6030  if (!isAnyPlastic) {
6031  KERNEL_ERROR("If STDP on group %d (%s) is set, group must have some incoming plastic connections.",
6032  gGrpId, groupConfigMap[gGrpId].grpName.c_str());
6034  }
6035  }
6036  }
6037 }
6038 
6039 // checks whether every group with Homeostasis also has STDP
6040 void SNN::verifyHomeostasis() {
6041  for (int gGrpId=0; gGrpId<getNumGroups(); gGrpId++) {
6042  if (groupConfigMap[gGrpId].homeoConfig.WithHomeostasis) {
6043  KERNEL_INFO("group %d STDP %d", gGrpId, groupConfigMap[gGrpId].WithSTDP);
6044  if (!groupConfigMap[gGrpId].WithSTDP) {
6045  KERNEL_ERROR("If homeostasis is enabled on group %d (%s), then STDP must be enabled, too.",
6046  gGrpId, groupConfigMap[gGrpId].grpName.c_str());
6048  }
6049  }
6050  }
6051 }
6052 
6054 //void SNN::verifyNumNeurons() {
6055 // int nExcPois = 0;
6056 // int nInhPois = 0;
6057 // int nExcReg = 0;
6058 // int nInhReg = 0;
6059 //
6060 // // scan all the groups and find the required information
6061 // // about the group (numN, numPostSynapses, numPreSynapses and others).
6062 // for(int g=0; g<numGroups; g++) {
6063 // if (groupConfigMap[g].Type==UNKNOWN_NEURON) {
6064 // KERNEL_ERROR("Unknown group for %d (%s)", g, groupInfo[g].Name.c_str());
6065 // exitSimulation(1);
6066 // }
6067 //
6068 // if (IS_INHIBITORY_TYPE(groupConfigMap[g].Type) && !(groupConfigMap[g].Type & POISSON_NEURON))
6069 // nInhReg += groupConfigMap[g].SizeN;
6070 // else if (IS_EXCITATORY_TYPE(groupConfigMap[g].Type) && !(groupConfigMap[g].Type & POISSON_NEURON))
6071 // nExcReg += groupConfigMap[g].SizeN;
6072 // else if (IS_EXCITATORY_TYPE(groupConfigMap[g].Type) && (groupConfigMap[g].Type & POISSON_NEURON))
6073 // nExcPois += groupConfigMap[g].SizeN;
6074 // else if (IS_INHIBITORY_TYPE(groupConfigMap[g].Type) && (groupConfigMap[g].Type & POISSON_NEURON))
6075 // nInhPois += groupConfigMap[g].SizeN;
6076 // }
6077 //
6078 // // check the newly gathered information with class members
6079 // if (numN != nExcReg+nInhReg+nExcPois+nInhPois) {
6080 // KERNEL_ERROR("nExcReg+nInhReg+nExcPois+nInhPois=%d does not add up to numN=%d",
6081 // nExcReg+nInhReg+nExcPois+nInhPois, numN);
6082 // exitSimulation(1);
6083 // }
6084 // if (numNReg != nExcReg+nInhReg) {
6085 // KERNEL_ERROR("nExcReg+nInhReg=%d does not add up to numNReg=%d", nExcReg+nInhReg, numNReg);
6086 // exitSimulation(1);
6087 // }
6088 // if (numNPois != nExcPois+nInhPois) {
6089 // KERNEL_ERROR("nExcPois+nInhPois=%d does not add up to numNPois=%d", nExcPois+nInhPois, numNPois);
6090 // exitSimulation(1);
6091 // }
6092 //
6093 // //printf("numN=%d == %d\n",numN,nExcReg+nInhReg+nExcPois+nInhPois);
6094 // //printf("numNReg=%d == %d\n",numNReg, nExcReg+nInhReg);
6095 // //printf("numNPois=%d == %d\n",numNPois, nExcPois+nInhPois);
6096 //
6097 // assert(numN <= 1000000);
6098 // assert((numN > 0) && (numN == numNExcReg + numNInhReg + numNPois));
6099 //}
6100 
6101 // \FIXME: not sure where this should go... maybe create some helper file?
6102 bool SNN::isPoint3DinRF(const RadiusRF& radius, const Point3D& pre, const Point3D& post) {
6103  // Note: RadiusRF rad is assumed to be the fanning in to the post neuron. So if the radius is 10 pixels, it means
6104  // that if you look at the post neuron, it will receive input from neurons that code for locations no more than
6105  // 10 pixels away. (The opposite is called a response/stimulus field.)
6106 
6107  double rfDist = getRFDist3D(radius, pre, post);
6108  return (rfDist >= 0.0 && rfDist <= 1.0);
6109 }
6110 
6111 double SNN::getRFDist3D(const RadiusRF& radius, const Point3D& pre, const Point3D& post) {
6112  // Note: RadiusRF rad is assumed to be the fanning in to the post neuron. So if the radius is 10 pixels, it means
6113  // that if you look at the post neuron, it will receive input from neurons that code for locations no more than
6114  // 10 pixels away.
6115 
6116  // ready output argument
6117  // SNN::isPoint3DinRF() will return true (connected) if rfDist e[0.0, 1.0]
6118  double rfDist = -1.0;
6119 
6120  // pre and post are connected in a generic 3D ellipsoid RF if x^2/a^2 + y^2/b^2 + z^2/c^2 <= 1.0, where
6121  // x = pre.x-post.x, y = pre.y-post.y, z = pre.z-post.z
6122  // x < 0 means: connect if y and z satisfy some constraints, but ignore x
6123  // x == 0 means: connect if y and z satisfy some constraints, and enforce pre.x == post.x
6124  if (radius.radX==0 && pre.x!=post.x || radius.radY==0 && pre.y!=post.y || radius.radZ==0 && pre.z!=post.z) {
6125  rfDist = -1.0;
6126  } else {
6127  // 3D ellipsoid: x^2/a^2 + y^2/b^2 + z^2/c^2 <= 1.0
6128  double xTerm = (radius.radX<=0) ? 0.0 : pow(pre.x-post.x,2)/pow(radius.radX,2);
6129  double yTerm = (radius.radY<=0) ? 0.0 : pow(pre.y-post.y,2)/pow(radius.radY,2);
6130  double zTerm = (radius.radZ<=0) ? 0.0 : pow(pre.z-post.z,2)/pow(radius.radZ,2);
6131  rfDist = xTerm + yTerm + zTerm;
6132  }
6133 
6134  return rfDist;
6135 }
6136 
6137 void SNN::partitionSNN() {
6138  int numAssignedNeurons[MAX_NET_PER_SNN] = {0};
6139 
6140 #ifndef __NO_CUDA__
6141  // get number of available GPU card(s) in the present machine
6142  numAvailableGPUs = configGPUDevice();
6143 #endif
6144 
6145  for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
6146  // assign a group to the GPU specified by users
6147  int gGrpId = grpIt->second.gGrpId;
6148  int netId = groupConfigMap[gGrpId].preferredNetId;
6149  if (netId != ANY) {
6150  assert(netId > ANY && netId < MAX_NET_PER_SNN);
6151  grpIt->second.netId = netId;
6152  numAssignedNeurons[netId] += groupConfigMap[gGrpId].numN;
6153  groupPartitionLists[netId].push_back(grpIt->second); // Copy by value, create a copy
6154  } else { // netId == ANY
6155  // TODO: add callback function that allow user to partition network by theirself
6156  // FIXME: make sure GPU(s) is available first
6157  // this parse separates groups into each local network and assign each group a netId
6158  if (preferredSimMode_ == CPU_MODE) {
6159  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
6160  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
6161  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
6162  } else if (preferredSimMode_ == GPU_MODE) {
6163  grpIt->second.netId = GPU_RUNTIME_BASE; // GPU 0
6164  numAssignedNeurons[GPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
6165  groupPartitionLists[GPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
6166  } else if (preferredSimMode_ == HYBRID_MODE) {
6167  // TODO: implement partition algorithm, use naive partition for now (allocate to CPU 0)
6168  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
6169  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
6170  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
6171  } else {
6172  KERNEL_ERROR("Unkown simulation mode");
6174  }
6175  }
6176 
6177  if (grpIt->second.netId == -1) { // the group was not assigned to any computing backend
6178  KERNEL_ERROR("Can't assign the group [%d] to any partition", grpIt->second.gGrpId);
6180  }
6181  }
6182 
6183  // this parse finds local connections (i.e., connection configs that conect local groups)
6184  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6185  if (!groupPartitionLists[netId].empty()) {
6186  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
6187  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
6188  localConnectLists[netId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
6189  }
6190  }
6191 
6192  //printf("The size of compConnectConfigMap is: %i\n", compConnectConfigMap.size());
6193  for (std::map<int, compConnectConfig>::iterator connIt = compConnectConfigMap.begin(); connIt != compConnectConfigMap.end(); connIt++) {
6194  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
6195  localCompConnectLists[netId].push_back(compConnectConfigMap[connIt->second.connId]); // Copy by value
6196  }
6197  }
6198  }
6199  }
6200 
6201  // this parse finds external groups and external connections
6202  spikeRoutingTable.clear();
6203  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6204  if (!groupPartitionLists[netId].empty()) {
6205  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
6206  int srcNetId = groupConfigMDMap[connIt->second.grpSrc].netId;
6207  int destNetId = groupConfigMDMap[connIt->second.grpDest].netId;
6208  if (srcNetId == netId && destNetId != netId) {
6209  // search the source group in groupPartitionLists and mark it as having external connections
6210  GroupConfigMD targetGroup;
6211  std::list<GroupConfigMD>::iterator srcGrpIt, destGrpIt;
6212 
6213  targetGroup.gGrpId = connIt->second.grpSrc;
6214  srcGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
6215  assert(srcGrpIt != groupPartitionLists[srcNetId].end());
6216  srcGrpIt->hasExternalConnect = true;
6217 
6218  // FIXME: fail to write external group if the only one external link across GPUs is uni directional (GPU0 -> GPU1, no GPU1 -> GPU0)
6219  targetGroup.gGrpId = connIt->second.grpDest;
6220  destGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
6221  if (destGrpIt == groupPartitionLists[srcNetId].end()) { // the "external" dest group has not yet been copied to te "local" group partition list
6222  numAssignedNeurons[srcNetId] += groupConfigMap[connIt->second.grpDest].numN;
6223  groupPartitionLists[srcNetId].push_back(groupConfigMDMap[connIt->second.grpDest]);
6224  }
6225 
6226  targetGroup.gGrpId = connIt->second.grpSrc;
6227  srcGrpIt = find(groupPartitionLists[destNetId].begin(), groupPartitionLists[destNetId].end(), targetGroup);
6228  if (srcGrpIt == groupPartitionLists[destNetId].end()) {
6229  numAssignedNeurons[destNetId] += groupConfigMap[connIt->second.grpSrc].numN;
6230  groupPartitionLists[destNetId].push_back(groupConfigMDMap[connIt->second.grpSrc]);
6231  }
6232 
6233  externalConnectLists[srcNetId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
6234 
6235  // build the spike routing table by the way
6236  //printf("%d,%d -> %d,%d\n", srcNetId, connIt->second.grpSrc, destNetId, connIt->second.grpDest);
6237  RoutingTableEntry rte(srcNetId, destNetId);
6238  spikeRoutingTable.push_back(rte);
6239  }
6240  }
6241  }
6242  }
6243 
6244  spikeRoutingTable.unique();
6245 
6246  // assign local neuron ids and, local group ids for each local network in the order
6247  // MPORTANT : NEURON ORGANIZATION/ARRANGEMENT MAP
6248  // <--- Excitatory --> | <-------- Inhibitory REGION ----------> | <-- Excitatory --> | <-- External -->
6249  // Excitatory-Regular | Inhibitory-Regular | Inhibitory-Poisson | Excitatory-Poisson | External Neurons
6250  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6251  if (!groupPartitionLists[netId].empty()) {
6252  int availableNeuronId = 0;
6253  int localGroupId = 0;
6254  for (int order = 0; order < 5; order++) {
6255  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
6256  unsigned int type = groupConfigMap[grpIt->gGrpId].type;
6257  if (IS_EXCITATORY_TYPE(type) && (type & POISSON_NEURON) && order == 3 && grpIt->netId == netId) {
6258  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6259  localGroupId++;
6260  } else if (IS_INHIBITORY_TYPE(type) && (type & POISSON_NEURON) && order == 2 && grpIt->netId == netId) {
6261  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6262  localGroupId++;
6263  } else if (IS_EXCITATORY_TYPE(type) && !(type & POISSON_NEURON) && order == 0 && grpIt->netId == netId) {
6264  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6265  localGroupId++;
6266  } else if (IS_INHIBITORY_TYPE(type) && !(type & POISSON_NEURON) && order == 1 && grpIt->netId == netId) {
6267  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6268  localGroupId++;
6269  } else if (order == 4 && grpIt->netId != netId) {
6270  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6271  localGroupId++;
6272  }
6273  }
6274  }
6275  assert(availableNeuronId == numAssignedNeurons[netId]);
6276  assert(localGroupId == groupPartitionLists[netId].size());
6277  }
6278  }
6279 
6280 
6281  // generation connections among groups according to group and connect configs
6282  // update ConnectConfig::numberOfConnections
6283  // update GroupConfig::numPostSynapses, GroupConfig::numPreSynapses
6284  if (loadSimFID == NULL) {
6285  connectNetwork();
6286  } else {
6287  KERNEL_INFO("Load Simulation");
6288  loadSimulation_internal(false); // true or false doesn't matter here
6289  }
6290 
6291  collectGlobalNetworkConfigP();
6292 
6293  // print group and connection overview
6294  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6295  if (!groupPartitionLists[netId].empty()) {
6296  KERNEL_INFO("\n+ Local Network (%d)", netId);
6297  KERNEL_INFO("|-+ Group List:");
6298  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++)
6299  printGroupInfo(netId, grpIt);
6300  }
6301 
6302  if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
6303  KERNEL_INFO("|-+ Connection List:");
6304  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
6305  printConnectionInfo(netId, connIt);
6306 
6307  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
6308  printConnectionInfo(netId, connIt);
6309  }
6310  }
6311 
6312  // print spike routing table
6313  printSikeRoutingInfo();
6314 
6315  snnState = PARTITIONED_SNN;
6316 }
6317 
6318 #ifdef LN_SETUP_NETWORK_MT
6319 // featFastSetup LN20201108
6320 void SNN::partitionSNNMT() {
6321  int numAssignedNeurons[MAX_NET_PER_SNN] = { 0 };
6322 
6323  // get number of available GPU card(s) in the present machine
6324  numAvailableGPUs = configGPUDevice();
6325 
6326  for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
6327  // assign a group to the GPU specified by users
6328  int gGrpId = grpIt->second.gGrpId;
6329  int netId = groupConfigMap[gGrpId].preferredNetId;
6330  if (netId != ANY) {
6331  assert(netId > ANY && netId < MAX_NET_PER_SNN);
6332  grpIt->second.netId = netId;
6333  numAssignedNeurons[netId] += groupConfigMap[gGrpId].numN;
6334  groupPartitionLists[netId].push_back(grpIt->second); // Copy by value, create a copy
6335  }
6336  else { // netId == ANY
6337  // TODO: add callback function that allow user to partition network by theirself
6338  // FIXME: make sure GPU(s) is available first
6339  // this parse separates groups into each local network and assign each group a netId
6340  if (preferredSimMode_ == CPU_MODE) {
6341  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
6342  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
6343  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
6344  }
6345  else if (preferredSimMode_ == GPU_MODE) {
6346  grpIt->second.netId = GPU_RUNTIME_BASE; // GPU 0
6347  numAssignedNeurons[GPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
6348  groupPartitionLists[GPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
6349  }
6350  else if (preferredSimMode_ == HYBRID_MODE) {
6351  // TODO: implement partition algorithm, use naive partition for now (allocate to CPU 0)
6352  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
6353  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
6354  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
6355  }
6356  else {
6357  KERNEL_ERROR("Unkown simulation mode");
6358  exitSimulation(-1);
6359  }
6360  }
6361 
6362  if (grpIt->second.netId == -1) { // the group was not assigned to any computing backend
6363  KERNEL_ERROR("Can't assign the group [%d] to any partition", grpIt->second.gGrpId);
6364  exitSimulation(-1);
6365  }
6366  }
6367 
6368  // this parse finds local connections (i.e., connection configs that conect local groups)
6369  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6370  if (!groupPartitionLists[netId].empty()) {
6371  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
6372  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
6373  localConnectLists[netId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
6374  }
6375  }
6376 
6377  //printf("The size of compConnectConfigMap is: %i\n", compConnectConfigMap.size());
6378  for (std::map<int, compConnectConfig>::iterator connIt = compConnectConfigMap.begin(); connIt != compConnectConfigMap.end(); connIt++) {
6379  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
6380  localCompConnectLists[netId].push_back(compConnectConfigMap[connIt->second.connId]); // Copy by value
6381  }
6382  }
6383  }
6384  }
6385 
6386  // this parse finds external groups and external connections
6387  spikeRoutingTable.clear();
6388  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6389  if (!groupPartitionLists[netId].empty()) {
6390  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
6391  int srcNetId = groupConfigMDMap[connIt->second.grpSrc].netId;
6392  int destNetId = groupConfigMDMap[connIt->second.grpDest].netId;
6393  if (srcNetId == netId && destNetId != netId) {
6394  // search the source group in groupPartitionLists and mark it as having external connections
6395  GroupConfigMD targetGroup;
6396  std::list<GroupConfigMD>::iterator srcGrpIt, destGrpIt;
6397 
6398  targetGroup.gGrpId = connIt->second.grpSrc;
6399  srcGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
6400  assert(srcGrpIt != groupPartitionLists[srcNetId].end());
6401  srcGrpIt->hasExternalConnect = true;
6402 
6403  // FIXME: fail to write external group if the only one external link across GPUs is uni directional (GPU0 -> GPU1, no GPU1 -> GPU0)
6404  targetGroup.gGrpId = connIt->second.grpDest;
6405  destGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
6406  if (destGrpIt == groupPartitionLists[srcNetId].end()) { // the "external" dest group has not yet been copied to te "local" group partition list
6407  numAssignedNeurons[srcNetId] += groupConfigMap[connIt->second.grpDest].numN;
6408  groupPartitionLists[srcNetId].push_back(groupConfigMDMap[connIt->second.grpDest]);
6409  }
6410 
6411  targetGroup.gGrpId = connIt->second.grpSrc;
6412  srcGrpIt = find(groupPartitionLists[destNetId].begin(), groupPartitionLists[destNetId].end(), targetGroup);
6413  if (srcGrpIt == groupPartitionLists[destNetId].end()) {
6414  numAssignedNeurons[destNetId] += groupConfigMap[connIt->second.grpSrc].numN;
6415  groupPartitionLists[destNetId].push_back(groupConfigMDMap[connIt->second.grpSrc]);
6416  }
6417 
6418  externalConnectLists[srcNetId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
6419 
6420  // build the spike routing table by the way
6421  //printf("%d,%d -> %d,%d\n", srcNetId, connIt->second.grpSrc, destNetId, connIt->second.grpDest);
6422  RoutingTableEntry rte(srcNetId, destNetId);
6423  spikeRoutingTable.push_back(rte);
6424  }
6425  }
6426  }
6427  }
6428 
6429  spikeRoutingTable.unique();
6430 
6431  // assign local neuron ids and, local group ids for each local network in the order
6432  // MPORTANT : NEURON ORGANIZATION/ARRANGEMENT MAP
6433  // <--- Excitatory --> | <-------- Inhibitory REGION ----------> | <-- Excitatory --> | <-- External -->
6434  // Excitatory-Regular | Inhibitory-Regular | Inhibitory-Poisson | Excitatory-Poisson | External Neurons
6435  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6436  if (!groupPartitionLists[netId].empty()) {
6437  int availableNeuronId = 0;
6438  int localGroupId = 0;
6439  for (int order = 0; order < 5; order++) {
6440  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
6441  unsigned int type = groupConfigMap[grpIt->gGrpId].type;
6442  if (IS_EXCITATORY_TYPE(type) && (type & POISSON_NEURON) && order == 3 && grpIt->netId == netId) {
6443  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6444  localGroupId++;
6445  }
6446  else if (IS_INHIBITORY_TYPE(type) && (type & POISSON_NEURON) && order == 2 && grpIt->netId == netId) {
6447  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6448  localGroupId++;
6449  }
6450  else if (IS_EXCITATORY_TYPE(type) && !(type & POISSON_NEURON) && order == 0 && grpIt->netId == netId) {
6451  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6452  localGroupId++;
6453  }
6454  else if (IS_INHIBITORY_TYPE(type) && !(type & POISSON_NEURON) && order == 1 && grpIt->netId == netId) {
6455  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6456  localGroupId++;
6457  }
6458  else if (order == 4 && grpIt->netId != netId) {
6459  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
6460  localGroupId++;
6461  }
6462  }
6463  }
6464  assert(availableNeuronId == numAssignedNeurons[netId]);
6465  assert(localGroupId == groupPartitionLists[netId].size());
6466  }
6467  }
6468 
6469 
6470  // generation connections among groups according to group and connect configs
6471  // update ConnectConfig::numberOfConnections
6472  // update GroupConfig::numPostSynapses, GroupConfig::numPreSynapses
6473  if (loadSimFID == NULL) {
6474  connectNetworkMT();
6475  }
6476  else {
6477  KERNEL_INFO("Load Simulation");
6478  loadSimulation_internal(false); // true or false doesn't matter here
6479  }
6480 
6481  collectGlobalNetworkConfigP();
6482 
6483  // print group and connection overview
6484  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6485  if (!groupPartitionLists[netId].empty()) {
6486  KERNEL_INFO("\n+ Local Network (%d)", netId);
6487  KERNEL_INFO("|-+ Group List:");
6488  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++)
6489  printGroupInfo(netId, grpIt);
6490  }
6491 
6492  if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
6493  KERNEL_INFO("|-+ Connection List:");
6494  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
6495  printConnectionInfo(netId, connIt);
6496 
6497  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
6498  printConnectionInfo(netId, connIt);
6499  }
6500  }
6501 
6502  // print spike routing table
6503  printSikeRoutingInfo();
6504 
6505  snnState = PARTITIONED_SNN;
6506 }
6507 #endif
6508 
6509 int SNN::loadSimulation_internal(bool onlyPlastic) {
6510  // TSC: so that we can restore the file position later...
6511  // MB: not sure why though...
6512  long file_position = ftell(loadSimFID);
6513 
6514  int tmpInt;
6515  float tmpFloat;
6516 
6517  bool readErr = false; // keep track of reading errors
6518  size_t result;
6519 
6520 
6521  // ------- read header ----------------
6522 
6523  fseek(loadSimFID, 0, SEEK_SET);
6524 
6525  // read file signature
6526  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6527  readErr |= (result!=1);
6528  if (tmpInt != 294338571) {
6529  KERNEL_ERROR("loadSimulation: Unknown file signature. This does not seem to be a "
6530  "simulation file created with CARLsim::saveSimulation.");
6532  }
6533 
6534  // read file version number
6535  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
6536  readErr |= (result!=1);
6537  if (tmpFloat > 0.3f) {
6538  KERNEL_ERROR("loadSimulation: Unsupported version number (%f)",tmpFloat);
6540  }
6541 
6542  // read simulation time
6543  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
6544  readErr |= (result!=1);
6545 
6546  // read execution time
6547  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
6548  readErr |= (result!=1);
6549 
6550  // read number of neurons
6551  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6552  readErr |= (result!=1);
6553  if (tmpInt != glbNetworkConfig.numN) {
6554  KERNEL_ERROR("loadSimulation: Number of neurons in file (%d) and simulation (%d) don't match.",
6555  tmpInt, glbNetworkConfig.numN);
6557  }
6558 
6559  // skip save and read pre-synapses & post-synapses in CARLsim5 since they are now netID based
6560  // read number of pre-synapses
6561  // result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6562  // readErr |= (result!=1);
6563  // if (numPreSynNet != tmpInt) {
6564  // KERNEL_ERROR("loadSimulation: numPreSynNet in file (%d) and simulation (%d) don't match.",
6565  // tmpInt, numPreSynNet);
6566  // exitSimulation(-1);
6567  // }
6568 
6570  //result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6571  //readErr |= (result!=1);
6572  //if (numPostSynNet != tmpInt) {
6573  // KERNEL_ERROR("loadSimulation: numPostSynNet in file (%d) and simulation (%d) don't match.",
6574  // tmpInt, numPostSynNet);
6575  // exitSimulation(-1);
6576  //}
6577 
6578  // read number of groups
6579  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6580  readErr |= (result!=1);
6581  if (tmpInt != numGroups) {
6582  KERNEL_ERROR("loadSimulation: Number of groups in file (%d) and simulation (%d) don't match.",
6583  tmpInt, numGroups);
6585  }
6586 
6587  // throw reading error instead of proceeding
6588  if (readErr) {
6589  fprintf(stderr,"loadSimulation: Error while reading file header"); // \todo Jinwei: why fprintf instead of KERNEL_ERROR
6591  }
6592 
6593 
6594  // ------- read group information ----------------
6595  for (int g=0; g<numGroups; g++) {
6596  // read StartN
6597  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6598  readErr |= (result!=1);
6599  if (tmpInt != groupConfigMDMap[g].gStartN) {
6600  KERNEL_ERROR("loadSimulation: StartN in file (%d) and grpInfo (%d) for group %d don't match.",
6601  tmpInt, groupConfigMDMap[g].gStartN, g);
6603  }
6604 
6605  // read EndN
6606  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6607  readErr |= (result!=1);
6608  if (tmpInt != groupConfigMDMap[g].gEndN) {
6609  KERNEL_ERROR("loadSimulation: EndN in file (%d) and grpInfo (%d) for group %d don't match.",
6610  tmpInt, groupConfigMDMap[g].gEndN, g);
6612  }
6613 
6614  // read SizeX
6615  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6616  readErr |= (result!=1);
6617  if (tmpInt != groupConfigMap[g].grid.numX) {
6618  KERNEL_ERROR("loadSimulation: numX in file (%d) and grpInfo (%d) for group %d don't match.",
6619  tmpInt, groupConfigMap[g].grid.numX, g);
6621  }
6622 
6623 
6624  // read SizeY
6625  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6626  readErr |= (result!=1);
6627  if (tmpInt != groupConfigMap[g].grid.numY) {
6628  KERNEL_ERROR("loadSimulation: numY in file (%d) and grpInfo (%d) for group %d don't match.",
6629  tmpInt, groupConfigMap[g].grid.numY, g);
6631  }
6632 
6633 
6634  // read SizeZ
6635  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
6636  readErr |= (result!=1);
6637  if (tmpInt != groupConfigMap[g].grid.numZ) {
6638  KERNEL_ERROR("loadSimulation: numZ in file (%d) and grpInfo (%d) for group %d don't match.",
6639  tmpInt, groupConfigMap[g].grid.numZ, g);
6641  }
6642 
6643 
6644  // read group name
6645  char name[100];
6646  result = fread(name, sizeof(char), 100, loadSimFID);
6647  readErr |= (result!=100);
6648  if (strcmp(name,groupConfigMap[g].grpName.c_str()) != 0) {
6649  KERNEL_ERROR("loadSimulation: Group names in file (%s) and grpInfo (%s) don't match.", name,
6650  groupConfigMap[g].grpName.c_str());
6652  }
6653  }
6654 
6655  if (readErr) {
6656  KERNEL_ERROR("loadSimulation: Error while reading group info");
6658  }
6659  // // read weight
6660  // result = fread(&weight, sizeof(float), 1, loadSimFID);
6661  // readErr |= (result!=1);
6662 
6663  // short int gIDpre = managerRuntimeData.grpIds[nIDpre];
6664  // if (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (weight>0)
6665  // || !IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (weight<0)) {
6666  // KERNEL_ERROR("loadSimulation: Sign of weight value (%s) does not match neuron type (%s)",
6667  // ((weight>=0.0f)?"plus":"minus"),
6668  // (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type)?"inhibitory":"excitatory"));
6669  // exitSimulation(-1);
6670  // }
6671 
6672  // // read max weight
6673  // result = fread(&maxWeight, sizeof(float), 1, loadSimFID);
6674  // readErr |= (result!=1);
6675  // if (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (maxWeight>=0)
6676  // || !IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (maxWeight<=0)) {
6677  // KERNEL_ERROR("loadSimulation: Sign of maxWeight value (%s) does not match neuron type (%s)",
6678  // ((maxWeight>=0.0f)?"plus":"minus"),
6679  // (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type)?"inhibitory":"excitatory"));
6680  // exitSimulation(-1);
6681  // }
6682 
6683  // ------- read synapse information ----------------
6684  int net_count = 0;
6685  result = fread(&net_count, sizeof(int), 1, loadSimFID);
6686  readErr |= (result!=1);
6687 
6688  for (int i = 0; i < net_count; i++) {
6689  int synapse_count = 0;
6690  result = fread(&synapse_count, sizeof(int), 1, loadSimFID);
6691  for (int j = 0; j < synapse_count; j++) {
6692  int gGrpIdPre;
6693  int gGrpIdPost;
6694  int grpNIdPre;
6695  int grpNIdPost;
6696  int connId;
6697  float weight;
6698  float maxWeight;
6699  int delay;
6700 
6701  // read gGrpIdPre
6702  result = fread(&gGrpIdPre, sizeof(int), 1, loadSimFID);
6703  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6704 
6705  // read gGrpIdPost
6706  result = fread(&gGrpIdPost, sizeof(int), 1, loadSimFID);
6707  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6708 
6709  // read grpNIdPre
6710  result = fread(&grpNIdPre, sizeof(int), 1, loadSimFID);
6711  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6712 
6713  // read grpNIdPost
6714  result = fread(&grpNIdPost, sizeof(int), 1, loadSimFID);
6715  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6716 
6717  // read connId
6718  result = fread(&connId, sizeof(int), 1, loadSimFID);
6719  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6720 
6721  // read weight
6722  result = fread(&weight, sizeof(float), 1, loadSimFID);
6723  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6724 
6725  // read maxWeight
6726  result = fread(&maxWeight, sizeof(float), 1, loadSimFID);
6727  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6728 
6729  // read delay
6730  result = fread(&delay, sizeof(int), 1, loadSimFID);
6731  readErr |= (result!=1); // FIX warning C4552: '!=': result of expression not used
6732 
6733 
6734  // check connection
6735  if (connectConfigMap[connId].grpSrc != gGrpIdPre) {
6736  KERNEL_ERROR("loadSimulation: source group in file (%d) and in simulation (%d) for connection %d don't match.",
6737  gGrpIdPre , connectConfigMap[connId].grpSrc, connId);
6739  }
6740 
6741  if (connectConfigMap[connId].grpDest != gGrpIdPost) {
6742  KERNEL_ERROR("loadSimulation: dest group in file (%d) and in simulation (%d) for connection %d don't match.",
6743  gGrpIdPost , connectConfigMap[connId].grpDest, connId);
6745  }
6746 
6747  // connect synapse
6748  // find netid for two groups
6749  int netIdPre = groupConfigMDMap[gGrpIdPre].netId;
6750  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
6751  bool isExternal = (netIdPre != netIdPost);
6752 
6753  // find global neuron id for two neurons
6754  int globalNIdPre = groupConfigMDMap[gGrpIdPre].gStartN + grpNIdPre;
6755  int globalNIdPost = groupConfigMDMap[gGrpIdPost].gStartN + grpNIdPost;
6756 
6757  bool connected =false;
6758  if (!isExternal) {
6759  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netIdPre].begin(); connIt != localConnectLists[netIdPre].end() && (!connected); connIt++) {
6760  if (connIt->connId == connId) {
6761  // connect two neurons
6762  connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, -1);
6763  connected = true;
6764  // update connection information
6765  connIt->numberOfConnections++;
6766  std::list<GroupConfigMD>::iterator grpIt;
6767 
6768  // fix me maybe: numPostSynapses and numPreSynpases could also be loaded from saved information directly to save time
6769  // the current implementation is a safer one
6770  GroupConfigMD targetGrp;
6771 
6772  targetGrp.gGrpId = gGrpIdPre;
6773  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
6774  assert(grpIt != groupPartitionLists[netIdPre].end());
6775  grpIt->numPostSynapses += 1;
6776 
6777  targetGrp.gGrpId = gGrpIdPost;
6778  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
6779  assert(grpIt != groupPartitionLists[netIdPost].end());
6780  grpIt->numPreSynapses += 1;
6781  }
6782  }
6783  } else {
6784  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netIdPre].begin(); connIt != externalConnectLists[netIdPre].end() && (!connected); connIt++) {
6785  if (connIt->connId == connId) {
6786  // connect two neurons
6787  connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, netIdPost);
6788  connected = true;
6789  // update connection information
6790  connIt->numberOfConnections++;
6791 
6792  // fix me maybe: numPostSynapses and numPreSynpases could also be loaded from saved information directly to save time
6793  // the current implementation is a safer one
6794  GroupConfigMD targetGrp;
6795  std::list<GroupConfigMD>::iterator grpIt;
6796 
6797  targetGrp.gGrpId = gGrpIdPre;
6798  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
6799  assert(grpIt != groupPartitionLists[netIdPre].end());
6800  grpIt->numPostSynapses += 1;
6801 
6802  targetGrp.gGrpId = gGrpIdPost;
6803  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
6804  assert(grpIt != groupPartitionLists[netIdPost].end());
6805  grpIt->numPreSynapses += 1;
6806 
6807  // update group information in another network
6808  targetGrp.gGrpId = gGrpIdPre;
6809  grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
6810  assert(grpIt != groupPartitionLists[netIdPost].end());
6811  grpIt->numPostSynapses += 1;
6812 
6813  targetGrp.gGrpId = gGrpIdPost;
6814  grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
6815  assert(grpIt != groupPartitionLists[netIdPost].end());
6816  grpIt->numPreSynapses += 1;
6817  }
6818  }
6819  }
6820  }
6821  }
6822 
6823  fseek(loadSimFID,file_position,SEEK_SET);
6824 
6825  return 0;
6826 }
6827 
6828 void SNN::generateRuntimeSNN() {
6829  // 1. genearte configurations for the simulation
6830  // generate (copy) group configs from groupPartitionLists[]
6831  generateRuntimeGroupConfigs();
6832 
6833  // generate (copy) connection configs from localConnectLists[] and exeternalConnectLists[]
6834  generateRuntimeConnectConfigs();
6835 
6836  // generate local network configs and accquire maximum size of rumtime data
6837  generateRuntimeNetworkConfigs();
6838 
6839  // 2. allocate space of runtime data used by the manager
6840  // - allocate firingTableD1, firingTableD2, timeTableD1, timeTableD2
6841  // - reset firingTableD1, firingTableD2, timeTableD1, timeTableD2
6842  allocateManagerSpikeTables();
6843  // - allocate voltage, recovery, Izh_a, Izh_b, Izh_c, Izh_d, current, extCurrent, gAMPA, gNMDA, gGABAa, gGABAb
6844  // lastSpikeTime, nSpikeCnt, stpu, stpx, Npre, Npre_plastic, Npost, cumulativePost, cumulativePre,
6845  // postSynapticIds, postDelayInfo, wt, wtChange, synSpikeTime, maxSynWt, preSynapticIds, grpIds, connIdsPreIdx,
6846  // grpDA, grp5HT, grpACh, grpNE, grpDABuffer, grp5HTBuffer, grpAChBuffer, grpNEBuffer, mulSynFast, mulSynSlow
6847  // - reset all above
6848  allocateManagerRuntimeData();
6849 
6850  // 3. initialize manager runtime data according to partitions (i.e., local networks)
6851  // 4a. allocate appropriate memory space (e.g., main memory (CPU) or device memory (GPU)).
6852  // 4b. load (copy) them to appropriate memory space for execution
6853  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6854  if (!groupPartitionLists[netId].empty()) {
6855  KERNEL_INFO("");
6856  if (netId < CPU_RUNTIME_BASE) {
6857  KERNEL_INFO("***************** Initializing GPU %d Runtime *************************", netId);
6858  } else {
6859  KERNEL_INFO("***************** Initializing CPU %d Runtime *************************", (netId - CPU_RUNTIME_BASE));
6860  }
6861  // build the runtime data according to local network, group, connection configuirations
6862 
6863  // generate runtime data for each group
6864  for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
6865  // local poisson neurons
6866  if (groupConfigs[netId][lGrpId].netId == netId && (groupConfigs[netId][lGrpId].Type & POISSON_NEURON)) {
6867  // - init lstSpikeTime
6868  // - reset avgFiring, stpu, stpx
6869  // - init stpx
6870  generatePoissonGroupRuntime(netId, lGrpId);
6871  }
6872  // local regular neurons
6873  if (groupConfigs[netId][lGrpId].netId == netId && !(groupConfigs[netId][lGrpId].Type & POISSON_NEURON)) {
6874  // - init grpDA, grp5HT, grpACh, grpNE
6875  // - init Izh_a, Izh_b, Izh_c, Izh_d, voltage, recovery, stpu, stpx
6876  // - init baseFiring, avgFiring
6877  // - init lastSpikeTime
6878  generateGroupRuntime(netId, lGrpId);
6879  }
6880  }
6881 
6882  // - init grpIds
6883  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) {
6884  managerRuntimeData.grpIds[lNId] = -1;
6885  for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) {
6886  if (lNId >= groupConfigs[netId][lGrpId].lStartN && lNId <= groupConfigs[netId][lGrpId].lEndN) {
6887  managerRuntimeData.grpIds[lNId] = (short int)lGrpId;
6888  break;
6889  }
6890  }
6891  assert(managerRuntimeData.grpIds[lNId] != -1);
6892  }
6893 
6894  // - init mulSynFast, mulSynSlow
6895  // - init Npre, Npre_plastic, Npost, cumulativePre, cumulativePost, preSynapticIds, postSynapticIds, postDelayInfo
6896  // - init wt, maxSynWt
6897  generateConnectionRuntime(netId);
6898 
6899  generateCompConnectionRuntime(netId);
6900 
6901  // - reset current
6902  resetCurrent(netId);
6903  // - reset conductance
6904  resetConductances(netId);
6905 
6906  // - reset wtChange
6907  // - init synSpikeTime
6908  resetSynapse(netId, false);
6909 
6910  allocateSNN(netId);
6911  }
6912  }
6913 
6914  // count allocated CPU/GPU runtime
6915  numGPUs = 0; numCores = 0;
6916  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6917  if (netId < CPU_RUNTIME_BASE && runtimeData[netId].allocated)
6918  numGPUs++;
6919  if (netId >= CPU_RUNTIME_BASE && runtimeData[netId].allocated)
6920  numCores++;
6921  }
6922 
6923  // 5. declare the spiking neural network is excutable
6924  snnState = EXECUTABLE_SNN;
6925 }
6926 
6927 void SNN::resetConductances(int netId) {
6928 #ifdef LN_I_CALC_TYPES
6929  // always allocate memory for explicid rise/decay as it may be partitionated by groups
6930  if (networkConfigs[netId].sim_with_conductances) {
6931  memset(managerRuntimeData.gAMPA, 0, sizeof(float) * networkConfigs[netId].numNReg);
6932  memset(managerRuntimeData.gNMDA_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
6933  memset(managerRuntimeData.gNMDA_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
6934  memset(managerRuntimeData.gNMDA, 0, sizeof(float) * networkConfigs[netId].numNReg);
6935  memset(managerRuntimeData.gGABAa, 0, sizeof(float) * networkConfigs[netId].numNReg);
6936  memset(managerRuntimeData.gGABAb_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
6937  memset(managerRuntimeData.gGABAb_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
6938  memset(managerRuntimeData.gGABAb, 0, sizeof(float) * networkConfigs[netId].numNReg);
6939  }
6940 #else
6941  if (networkConfigs[netId].sim_with_conductances) {
6942  memset(managerRuntimeData.gAMPA, 0, sizeof(float) * networkConfigs[netId].numNReg);
6943  if (networkConfigs[netId].sim_with_NMDA_rise) {
6944  memset(managerRuntimeData.gNMDA_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
6945  memset(managerRuntimeData.gNMDA_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
6946  } else {
6947  memset(managerRuntimeData.gNMDA, 0, sizeof(float) * networkConfigs[netId].numNReg);
6948  }
6949  memset(managerRuntimeData.gGABAa, 0, sizeof(float) * networkConfigs[netId].numNReg);
6950  if (networkConfigs[netId].sim_with_GABAb_rise) {
6951  memset(managerRuntimeData.gGABAb_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
6952  memset(managerRuntimeData.gGABAb_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
6953  } else {
6954  memset(managerRuntimeData.gGABAb, 0, sizeof(float) * networkConfigs[netId].numNReg);
6955  }
6956  }
6957 #endif
6958 }
6959 
6960 void SNN::resetCurrent(int netId) {
6961  assert(managerRuntimeData.current != NULL);
6962  memset(managerRuntimeData.current, 0, sizeof(float) * networkConfigs[netId].numNReg);
6963 }
6964 
6965 // FIXME: unused function
6966 void SNN::resetFiringInformation() {
6967  // Reset firing tables and time tables to default values..
6968 
6969  // reset various times...
6970  simTimeMs = 0;
6971  simTimeSec = 0;
6972  simTime = 0;
6973 
6974  // reset the propogation Buffer.
6975  resetPropogationBuffer();
6976  // reset Timing Table..
6977  resetTimeTable();
6978 }
6979 
6980 void SNN::resetTiming() {
6981  prevExecutionTime = cumExecutionTime;
6982  executionTime = 0.0f;
6983 }
6984 
6985 void SNN::resetNeuromodulator(int netId, int lGrpId) {
6986  managerRuntimeData.grpDA[lGrpId] = groupConfigs[netId][lGrpId].baseDP;
6987  managerRuntimeData.grp5HT[lGrpId] = groupConfigs[netId][lGrpId].base5HT;
6988  managerRuntimeData.grpACh[lGrpId] = groupConfigs[netId][lGrpId].baseACh;
6989  managerRuntimeData.grpNE[lGrpId] = groupConfigs[netId][lGrpId].baseNE;
6990 }
6991 
6995 void SNN::resetNeuron(int netId, int lGrpId, int lNId) {
6996  int gGrpId = groupConfigs[netId][lGrpId].gGrpId; // get global group id
6997  assert(lNId < networkConfigs[netId].numNReg);
6998 
6999  if (groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a == -1 && groupConfigMap[gGrpId].isLIF == 0) {
7000  KERNEL_ERROR("setNeuronParameters must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
7002  }
7003 
7004  if (groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m == -1 && groupConfigMap[gGrpId].isLIF == 1) {
7005  KERNEL_ERROR("setNeuronParametersLIF must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
7007  }
7008 
7009  managerRuntimeData.Izh_a[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd * (float)drand48();
7010  managerRuntimeData.Izh_b[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd * (float)drand48();
7011  managerRuntimeData.Izh_c[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd * (float)drand48();
7012  managerRuntimeData.Izh_d[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd * (float)drand48();
7013  managerRuntimeData.Izh_C[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd * (float)drand48();
7014  managerRuntimeData.Izh_k[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd * (float)drand48();
7015  managerRuntimeData.Izh_vr[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd * (float)drand48();
7016  managerRuntimeData.Izh_vt[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd * (float)drand48();
7017  managerRuntimeData.Izh_vpeak[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd * (float)drand48();
7018  managerRuntimeData.lif_tau_m[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m;
7019  managerRuntimeData.lif_tau_ref[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref;
7020  managerRuntimeData.lif_tau_ref_c[lNId] = 0;
7021  managerRuntimeData.lif_vTh[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh;
7022  managerRuntimeData.lif_vReset[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset;
7023 
7024  // calculate gain and bias for the lif neuron
7025  if (groupConfigs[netId][lGrpId].isLIF){
7026  // gain an bias of the LIF neuron is calculated based on Membrane resistance
7027  float rmRange = (float)(groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem - groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem);
7028  float minRmem = (float)groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem;
7029  managerRuntimeData.lif_bias[lNId] = 0.0f;
7030  managerRuntimeData.lif_gain[lNId] = minRmem + rmRange * (float)drand48();
7031  }
7032 
7033  managerRuntimeData.nextVoltage[lNId] = managerRuntimeData.voltage[lNId] = groupConfigs[netId][lGrpId].isLIF ? managerRuntimeData.lif_vReset[lNId] : (groupConfigs[netId][lGrpId].withParamModel_9 ? managerRuntimeData.Izh_vr[lNId] : managerRuntimeData.Izh_c[lNId]);
7034  managerRuntimeData.recovery[lNId] = groupConfigs[netId][lGrpId].withParamModel_9 ? 0.0f : managerRuntimeData.Izh_b[lNId] * managerRuntimeData.voltage[lNId];
7035 
7036  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
7037  // set the baseFiring with some standard deviation.
7038  if (drand48() > 0.5) {
7039  managerRuntimeData.baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring + groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
7040  } else {
7041  managerRuntimeData.baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring - groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
7042  if(managerRuntimeData.baseFiring[lNId] < 0.1f) managerRuntimeData.baseFiring[lNId] = 0.1f;
7043  }
7044 
7045  if (groupConfigMap[gGrpId].homeoConfig.baseFiring != 0.0f) {
7046  managerRuntimeData.avgFiring[lNId] = managerRuntimeData.baseFiring[lNId];
7047  } else {
7048  managerRuntimeData.baseFiring[lNId] = 0.0f;
7049  managerRuntimeData.avgFiring[lNId] = 0.0f;
7050  }
7051  }
7052 
7053  managerRuntimeData.lastSpikeTime[lNId] = MAX_SIMULATION_TIME;
7054 
7055  if(groupConfigs[netId][lGrpId].WithSTP) {
7056  for (int j = 0; j < networkConfigs[netId].maxDelay + 1; j++) { // is of size maxDelay_+1
7057  int index = STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
7058  managerRuntimeData.stpu[index] = 0.0f;
7059  managerRuntimeData.stpx[index] = 1.0f;
7060  }
7061  }
7062 }
7063 
7064 void SNN::resetMonitors(bool deallocate) {
7065  // order is important! monitor objects might point to SNN or CARLsim,
7066  // need to deallocate them first
7067 
7068 
7069  // -------------- DEALLOCATE MONITOR OBJECTS ---------------------- //
7070 
7071  // delete all SpikeMonitor objects
7072  // don't kill SpikeMonitorCore objects, they will get killed automatically
7073  for (int i=0; i<numSpikeMonitor; i++) {
7074  if (spikeMonList[i]!=NULL && deallocate) delete spikeMonList[i];
7075  spikeMonList[i]=NULL;
7076  }
7077 
7078  // delete all NeuronMonitor objects
7079  // don't kill NeuronMonitorCore objects, they will get killed automatically
7080  for (int i = 0; i<numNeuronMonitor; i++) {
7081  if (neuronMonList[i] != NULL && deallocate) delete neuronMonList[i];
7082  neuronMonList[i] = NULL;
7083  }
7084 
7085  // delete all GroupMonitor objects
7086  // don't kill GroupMonitorCore objects, they will get killed automatically
7087  for (int i=0; i<numGroupMonitor; i++) {
7088  if (groupMonList[i]!=NULL && deallocate) delete groupMonList[i];
7089  groupMonList[i]=NULL;
7090  }
7091 
7092  // delete all ConnectionMonitor objects
7093  // don't kill ConnectionMonitorCore objects, they will get killed automatically
7094  for (int i=0; i<numConnectionMonitor; i++) {
7095  if (connMonList[i]!=NULL && deallocate) delete connMonList[i];
7096  connMonList[i]=NULL;
7097  }
7098 }
7099 
7100 void SNN::resetGroupConfigs(bool deallocate) {
7101  // clear all existing group configurations
7102  if (deallocate) groupConfigMap.clear();
7103 }
7104 
7105 void SNN::resetConnectionConfigs(bool deallocate) {
7106  // clear all existing connection configurations
7107  if (deallocate) connectConfigMap.clear();
7108 }
7109 
7110 void SNN::deleteManagerRuntimeData() {
7111  if (spikeBuf!=NULL) delete spikeBuf;
7112  if (managerRuntimeData.spikeGenBits!=NULL) delete[] managerRuntimeData.spikeGenBits;
7113  spikeBuf=NULL; managerRuntimeData.spikeGenBits=NULL;
7114 
7115  // LN20201103 featSpikes poisson
7116  if (managerRuntimeData.randNum!=NULL) {
7117  delete[] managerRuntimeData.randNum;
7118  managerRuntimeData.randNum = NULL;
7119  }
7120  if (managerRuntimeData.poissonFireRate!=NULL) {
7121  delete[] managerRuntimeData.poissonFireRate;
7122  managerRuntimeData.poissonFireRate = NULL;
7123  }
7124 
7125  // clear data (i.e., concentration of neuromodulator) of groups
7126  if (managerRuntimeData.grpDA != NULL) delete [] managerRuntimeData.grpDA;
7127  if (managerRuntimeData.grp5HT != NULL) delete [] managerRuntimeData.grp5HT;
7128  if (managerRuntimeData.grpACh != NULL) delete [] managerRuntimeData.grpACh;
7129  if (managerRuntimeData.grpNE != NULL) delete [] managerRuntimeData.grpNE;
7130  managerRuntimeData.grpDA = NULL;
7131  managerRuntimeData.grp5HT = NULL;
7132  managerRuntimeData.grpACh = NULL;
7133  managerRuntimeData.grpNE = NULL;
7134 
7135  // clear assistive data buffer for group monitor
7136  if (managerRuntimeData.grpDABuffer != NULL) delete [] managerRuntimeData.grpDABuffer;
7137  if (managerRuntimeData.grp5HTBuffer != NULL) delete [] managerRuntimeData.grp5HTBuffer;
7138  if (managerRuntimeData.grpAChBuffer != NULL) delete [] managerRuntimeData.grpAChBuffer;
7139  if (managerRuntimeData.grpNEBuffer != NULL) delete [] managerRuntimeData.grpNEBuffer;
7140  managerRuntimeData.grpDABuffer = NULL; managerRuntimeData.grp5HTBuffer = NULL;
7141  managerRuntimeData.grpAChBuffer = NULL; managerRuntimeData.grpNEBuffer = NULL;
7142 
7143  // -------------- DEALLOCATE CORE OBJECTS ---------------------- //
7144 
7145  if (managerRuntimeData.voltage!=NULL) delete[] managerRuntimeData.voltage;
7146  if (managerRuntimeData.nextVoltage != NULL) delete[] managerRuntimeData.nextVoltage;
7147  if (managerRuntimeData.recovery!=NULL) delete[] managerRuntimeData.recovery;
7148  if (managerRuntimeData.current!=NULL) delete[] managerRuntimeData.current;
7149  if (managerRuntimeData.extCurrent!=NULL) delete[] managerRuntimeData.extCurrent;
7150  if (managerRuntimeData.totalCurrent != NULL) delete[] managerRuntimeData.totalCurrent;
7151  if (managerRuntimeData.curSpike != NULL) delete[] managerRuntimeData.curSpike;
7152  if (managerRuntimeData.nVBuffer != NULL) delete[] managerRuntimeData.nVBuffer;
7153  if (managerRuntimeData.nUBuffer != NULL) delete[] managerRuntimeData.nUBuffer;
7154  if (managerRuntimeData.nIBuffer != NULL) delete[] managerRuntimeData.nIBuffer;
7155  managerRuntimeData.voltage=NULL; managerRuntimeData.recovery=NULL; managerRuntimeData.current=NULL; managerRuntimeData.extCurrent=NULL;
7156  managerRuntimeData.nextVoltage = NULL; managerRuntimeData.totalCurrent = NULL; managerRuntimeData.curSpike = NULL;
7157  managerRuntimeData.nVBuffer = NULL; managerRuntimeData.nUBuffer = NULL; managerRuntimeData.nIBuffer = NULL;
7158 
7159  if (managerRuntimeData.Izh_a!=NULL) delete[] managerRuntimeData.Izh_a;
7160  if (managerRuntimeData.Izh_b!=NULL) delete[] managerRuntimeData.Izh_b;
7161  if (managerRuntimeData.Izh_c!=NULL) delete[] managerRuntimeData.Izh_c;
7162  if (managerRuntimeData.Izh_d!=NULL) delete[] managerRuntimeData.Izh_d;
7163  if (managerRuntimeData.Izh_C!=NULL) delete[] managerRuntimeData.Izh_C;
7164  if (managerRuntimeData.Izh_k!=NULL) delete[] managerRuntimeData.Izh_k;
7165  if (managerRuntimeData.Izh_vr!=NULL) delete[] managerRuntimeData.Izh_vr;
7166  if (managerRuntimeData.Izh_vt!=NULL) delete[] managerRuntimeData.Izh_vt;
7167  if (managerRuntimeData.Izh_vpeak!=NULL) delete[] managerRuntimeData.Izh_vpeak;
7168  managerRuntimeData.Izh_a=NULL; managerRuntimeData.Izh_b=NULL; managerRuntimeData.Izh_c=NULL; managerRuntimeData.Izh_d=NULL;
7169  managerRuntimeData.Izh_C = NULL; managerRuntimeData.Izh_k = NULL; managerRuntimeData.Izh_vr = NULL; managerRuntimeData.Izh_vt = NULL; managerRuntimeData.Izh_vpeak = NULL;
7170 
7171  if (managerRuntimeData.lif_tau_m!=NULL) delete[] managerRuntimeData.lif_tau_m;
7172  if (managerRuntimeData.lif_tau_ref!=NULL) delete[] managerRuntimeData.lif_tau_ref;
7173  if (managerRuntimeData.lif_tau_ref_c!=NULL) delete[] managerRuntimeData.lif_tau_ref_c;
7174  if (managerRuntimeData.lif_vTh!=NULL) delete[] managerRuntimeData.lif_vTh;
7175  if (managerRuntimeData.lif_vReset!=NULL) delete[] managerRuntimeData.lif_vReset;
7176  if (managerRuntimeData.lif_gain!=NULL) delete[] managerRuntimeData.lif_gain;
7177  if (managerRuntimeData.lif_bias!=NULL) delete[] managerRuntimeData.lif_bias;
7178  managerRuntimeData.lif_tau_m=NULL; managerRuntimeData.lif_tau_ref=NULL; managerRuntimeData.lif_vTh=NULL;
7179  managerRuntimeData.lif_vReset=NULL; managerRuntimeData.lif_gain=NULL; managerRuntimeData.lif_bias=NULL;
7180  managerRuntimeData.lif_tau_ref_c=NULL;
7181 
7182  if (managerRuntimeData.Npre!=NULL) delete[] managerRuntimeData.Npre;
7183  if (managerRuntimeData.Npre_plastic!=NULL) delete[] managerRuntimeData.Npre_plastic;
7184  if (managerRuntimeData.Npost!=NULL) delete[] managerRuntimeData.Npost;
7185  managerRuntimeData.Npre=NULL; managerRuntimeData.Npre_plastic=NULL; managerRuntimeData.Npost=NULL;
7186 
7187  if (managerRuntimeData.cumulativePre!=NULL) delete[] managerRuntimeData.cumulativePre;
7188  if (managerRuntimeData.cumulativePost!=NULL) delete[] managerRuntimeData.cumulativePost;
7189  managerRuntimeData.cumulativePre=NULL; managerRuntimeData.cumulativePost=NULL;
7190 
7191  if (managerRuntimeData.gAMPA!=NULL) delete[] managerRuntimeData.gAMPA;
7192  if (managerRuntimeData.gNMDA!=NULL) delete[] managerRuntimeData.gNMDA;
7193  if (managerRuntimeData.gNMDA_r!=NULL) delete[] managerRuntimeData.gNMDA_r;
7194  if (managerRuntimeData.gNMDA_d!=NULL) delete[] managerRuntimeData.gNMDA_d;
7195  if (managerRuntimeData.gGABAa!=NULL) delete[] managerRuntimeData.gGABAa;
7196  if (managerRuntimeData.gGABAb!=NULL) delete[] managerRuntimeData.gGABAb;
7197  if (managerRuntimeData.gGABAb_r!=NULL) delete[] managerRuntimeData.gGABAb_r;
7198  if (managerRuntimeData.gGABAb_d!=NULL) delete[] managerRuntimeData.gGABAb_d;
7199  managerRuntimeData.gAMPA=NULL; managerRuntimeData.gNMDA=NULL; managerRuntimeData.gNMDA_r=NULL; managerRuntimeData.gNMDA_d=NULL;
7200  managerRuntimeData.gGABAa=NULL; managerRuntimeData.gGABAb=NULL; managerRuntimeData.gGABAb_r=NULL; managerRuntimeData.gGABAb_d=NULL;
7201 
7202  if (managerRuntimeData.stpu!=NULL) delete[] managerRuntimeData.stpu;
7203  if (managerRuntimeData.stpx!=NULL) delete[] managerRuntimeData.stpx;
7204  managerRuntimeData.stpu=NULL; managerRuntimeData.stpx=NULL;
7205 
7206  if (managerRuntimeData.avgFiring!=NULL) delete[] managerRuntimeData.avgFiring;
7207  if (managerRuntimeData.baseFiring!=NULL) delete[] managerRuntimeData.baseFiring;
7208  managerRuntimeData.avgFiring=NULL; managerRuntimeData.baseFiring=NULL;
7209 
7210  if (managerRuntimeData.lastSpikeTime!=NULL) delete[] managerRuntimeData.lastSpikeTime;
7211  if (managerRuntimeData.synSpikeTime !=NULL) delete[] managerRuntimeData.synSpikeTime;
7212  if (managerRuntimeData.nSpikeCnt!=NULL) delete[] managerRuntimeData.nSpikeCnt;
7213  managerRuntimeData.lastSpikeTime=NULL; managerRuntimeData.synSpikeTime=NULL; managerRuntimeData.nSpikeCnt=NULL;
7214 
7215  if (managerRuntimeData.postDelayInfo!=NULL) delete[] managerRuntimeData.postDelayInfo;
7216  if (managerRuntimeData.preSynapticIds!=NULL) delete[] managerRuntimeData.preSynapticIds;
7217  if (managerRuntimeData.postSynapticIds!=NULL) delete[] managerRuntimeData.postSynapticIds;
7218  managerRuntimeData.postDelayInfo=NULL; managerRuntimeData.preSynapticIds=NULL; managerRuntimeData.postSynapticIds=NULL;
7219 
7220  if (managerRuntimeData.wt!=NULL) delete[] managerRuntimeData.wt;
7221  if (managerRuntimeData.maxSynWt!=NULL) delete[] managerRuntimeData.maxSynWt;
7222  if (managerRuntimeData.wtChange !=NULL) delete[] managerRuntimeData.wtChange;
7223  managerRuntimeData.wt=NULL; managerRuntimeData.maxSynWt=NULL; managerRuntimeData.wtChange=NULL;
7224 
7225  if (mulSynFast!=NULL) delete[] mulSynFast;
7226  if (mulSynSlow!=NULL) delete[] mulSynSlow;
7227  if (managerRuntimeData.connIdsPreIdx!=NULL) delete[] managerRuntimeData.connIdsPreIdx;
7228  mulSynFast=NULL; mulSynSlow=NULL; managerRuntimeData.connIdsPreIdx=NULL;
7229 
7230  if (managerRuntimeData.grpIds!=NULL) delete[] managerRuntimeData.grpIds;
7231  managerRuntimeData.grpIds=NULL;
7232 
7233  if (managerRuntimeData.timeTableD2 != NULL) delete [] managerRuntimeData.timeTableD2;
7234  if (managerRuntimeData.timeTableD1 != NULL) delete [] managerRuntimeData.timeTableD1;
7235  managerRuntimeData.timeTableD2 = NULL; managerRuntimeData.timeTableD1 = NULL;
7236 
7237  if (managerRuntimeData.firingTableD2!=NULL) delete[] managerRuntimeData.firingTableD2;
7238  if (managerRuntimeData.firingTableD1!=NULL) delete[] managerRuntimeData.firingTableD1;
7239  //if (managerRuntimeData.firingTableD2!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.firingTableD2));
7240  //if (managerRuntimeData.firingTableD1!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.firingTableD1));
7241  managerRuntimeData.firingTableD2 = NULL; managerRuntimeData.firingTableD1 = NULL;
7242 
7243 #ifdef LN_AXON_PLAST
7244  if (managerRuntimeData.firingTimesD2 != NULL) delete[] managerRuntimeData.firingTimesD2;
7245  managerRuntimeData.firingTimesD2 = NULL;
7246 #endif
7247 
7248  if (managerRuntimeData.extFiringTableD2!=NULL) delete[] managerRuntimeData.extFiringTableD2;
7249  if (managerRuntimeData.extFiringTableD1!=NULL) delete[] managerRuntimeData.extFiringTableD1;
7250  //if (managerRuntimeData.extFiringTableD2!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableD2));
7251  //if (managerRuntimeData.extFiringTableD1!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableD1));
7252  managerRuntimeData.extFiringTableD2 = NULL; managerRuntimeData.extFiringTableD1 = NULL;
7253 
7254  if (managerRuntimeData.extFiringTableEndIdxD1 != NULL) delete[] managerRuntimeData.extFiringTableEndIdxD1;
7255  if (managerRuntimeData.extFiringTableEndIdxD2 != NULL) delete[] managerRuntimeData.extFiringTableEndIdxD2;
7256  //if (managerRuntimeData.extFiringTableEndIdxD1 != NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableEndIdxD1));
7257  //if (managerRuntimeData.extFiringTableEndIdxD2 != NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableEndIdxD2));
7258  managerRuntimeData.extFiringTableEndIdxD1 = NULL; managerRuntimeData.extFiringTableEndIdxD2 = NULL;
7259 }
7260 
7264 void SNN::resetPoissonNeuron(int netId, int lGrpId, int lNId) {
7265  assert(lNId < networkConfigs[netId].numN);
7266  managerRuntimeData.lastSpikeTime[lNId] = MAX_SIMULATION_TIME;
7267  if (groupConfigs[netId][lGrpId].WithHomeostasis)
7268  managerRuntimeData.avgFiring[lNId] = 0.0f;
7269 
7270  if (groupConfigs[netId][lGrpId].WithSTP) {
7271  for (int j = 0; j < networkConfigs[netId].maxDelay + 1; j++) { // is of size maxDelay_+1
7272  int index = STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
7273  managerRuntimeData.stpu[index] = 0.0f;
7274  managerRuntimeData.stpx[index] = 1.0f;
7275  }
7276  }
7277 }
7278 
7279 void SNN::resetPropogationBuffer() {
7280  // FIXME: why 1023?
7281  spikeBuf->reset(0, 1023);
7282 }
7283 
7284 //Reset wt, wtChange, pre-firing time values to default values, rewritten to
7285 //integrate changes between JMN and MDR -- KDC
7286 //if changeWeights is false, we should keep the values of the weights as they currently
7287 //are but we should be able to change them to plastic or fixed synapses. -- KDC
7288 // FIXME: imlement option of resetting weights
7289 void SNN::resetSynapse(int netId, bool changeWeights) {
7290  memset(managerRuntimeData.wtChange, 0, sizeof(float) * networkConfigs[netId].numPreSynNet); // reset the synaptic derivatives
7291 
7292  for (int syn = 0; syn < networkConfigs[netId].numPreSynNet; syn++)
7293  managerRuntimeData.synSpikeTime[syn] = MAX_SIMULATION_TIME; // reset the spike time of each syanpse
7294 }
7295 
7296 void SNN::resetTimeTable() {
7297  memset(managerRuntimeData.timeTableD2, 0, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
7298  memset(managerRuntimeData.timeTableD1, 0, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
7299 }
7300 
7301 void SNN::resetFiringTable() {
7302  memset(managerRuntimeData.firingTableD2, 0, sizeof(int) * managerRTDSize.maxMaxSpikeD2);
7303  memset(managerRuntimeData.firingTableD1, 0, sizeof(int) * managerRTDSize.maxMaxSpikeD1);
7304 #ifdef LN_AXON_PLAST
7305  memset(managerRuntimeData.firingTimesD2, 0, sizeof(unsigned int) * managerRTDSize.maxMaxSpikeD2);
7306 #endif
7307  memset(managerRuntimeData.extFiringTableEndIdxD2, 0, sizeof(int) * managerRTDSize.maxNumGroups);
7308  memset(managerRuntimeData.extFiringTableEndIdxD1, 0, sizeof(int) * managerRTDSize.maxNumGroups);
7309  memset(managerRuntimeData.extFiringTableD2, 0, sizeof(int*) * managerRTDSize.maxNumGroups);
7310  memset(managerRuntimeData.extFiringTableD1, 0, sizeof(int*) * managerRTDSize.maxNumGroups);
7311 }
7312 
7313 void SNN::resetSpikeCnt(int gGrpId) {
7314  assert(gGrpId >= ALL);
7315 
7316  if (gGrpId == ALL) {
7317  #ifndef __NO_PTHREADS__ // POSIX
7318  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
7319  cpu_set_t cpus;
7320  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
7321  int threadCount = 0;
7322  #endif
7323 
7324  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
7325  if (!groupPartitionLists[netId].empty()) {
7326  if (netId < CPU_RUNTIME_BASE) // GPU runtime
7327  resetSpikeCnt_GPU(netId, ALL);
7328  else{ // CPU runtime
7329  #ifdef __NO_PTHREADS__
7330  resetSpikeCnt_CPU(netId, ALL);
7331  #else // Linux or MAC
7332  pthread_attr_t attr;
7333  pthread_attr_init(&attr);
7334  CPU_ZERO(&cpus);
7335  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
7336  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
7337 
7338  argsThreadRoutine[threadCount].snn_pointer = this;
7339  argsThreadRoutine[threadCount].netId = netId;
7340  argsThreadRoutine[threadCount].lGrpId = ALL;
7341  argsThreadRoutine[threadCount].startIdx = 0;
7342  argsThreadRoutine[threadCount].endIdx = 0;
7343  argsThreadRoutine[threadCount].GtoLOffset = 0;
7344 
7345  pthread_create(&threads[threadCount], &attr, &SNN::helperResetSpikeCnt_CPU, (void*)&argsThreadRoutine[threadCount]);
7346  pthread_attr_destroy(&attr);
7347  threadCount++;
7348  #endif
7349  }
7350  }
7351  }
7352 
7353  #ifndef __NO_PTHREADS__ // POSIX
7354  // join all the threads
7355  for (int i=0; i<threadCount; i++){
7356  pthread_join(threads[i], NULL);
7357  }
7358  #endif
7359  }
7360  else {
7361  int netId = groupConfigMDMap[gGrpId].netId;
7362  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
7363 
7364  if (netId < CPU_RUNTIME_BASE) // GPU runtime
7365  resetSpikeCnt_GPU(netId, lGrpId);
7366  else // CPU runtime
7367  resetSpikeCnt_CPU(netId, lGrpId);
7368  }
7369 }
7370 
7371 
7373 inline SynInfo SNN::SET_CONN_ID(int nId, int sId, int grpId) {
7374  if (grpId > GROUP_ID_MASK) {
7375  KERNEL_ERROR("Error: Group Id (%d) exceeds maximum limit (%d)", grpId, GROUP_ID_MASK);
7377  }
7378 
7379  SynInfo synInfo;
7380  //p.postId = (((sid)<<CONN_SYN_NEURON_BITS)+((nid)&CONN_SYN_NEURON_MASK));
7381  //p.grpId = grpId;
7382  synInfo.gsId = ((grpId << NUM_SYNAPSE_BITS) | sId);
7383  synInfo.nId = nId;
7384 
7385  return synInfo;
7386 }
7387 
7388 
7389 void SNN::setGrpTimeSlice(int gGrpId, int timeSlice) {
7390  if (gGrpId == ALL) {
7391  for(int grpId = 0; grpId < numGroups; grpId++) {
7392  if (groupConfigMap[grpId].isSpikeGenerator)
7393  setGrpTimeSlice(grpId, timeSlice);
7394  }
7395  } else {
7396  assert((timeSlice > 0 ) && (timeSlice <= MAX_TIME_SLICE));
7397  // the group should be poisson spike generator group
7398  groupConfigMDMap[gGrpId].currTimeSlice = timeSlice;
7399  }
7400 }
7401 
7402 // method to set const member randSeed_
7403 int SNN::setRandSeed(int seed) {
7404  if (seed<0)
7405  return time(NULL);
7406  else if(seed==0)
7407  return 123;
7408  else
7409  return seed;
7410 }
7411 
7412 void SNN::fillSpikeGenBits(int netId) {
7413  SpikeBuffer::SpikeIterator spikeBufIter;
7414  SpikeBuffer::SpikeIterator spikeBufIterEnd = spikeBuf->back();
7415 
7416  // Covert spikes stored in spikeBuffer to SpikeGenBit
7417  for (spikeBufIter = spikeBuf->front(); spikeBufIter != spikeBufIterEnd; ++spikeBufIter) {
7418  // get the global neuron id and group id for this particular spike
7419  int gGrpId = spikeBufIter->grpId;
7420 
7421  if (groupConfigMDMap[gGrpId].netId == netId) {
7422  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
7423  int lNId = spikeBufIter->neurId /* gNId */ + groupConfigMDMap[gGrpId].GtoLOffset;
7424 
7425  // add spike to spikeGentBit
7426  assert(groupConfigMap[gGrpId].isSpikeGenerator == true);
7427 
7428  int nIdPos = (lNId - groupConfigs[netId][lGrpId].lStartN + groupConfigs[netId][lGrpId].Noffset);
7429  int nIdBitPos = nIdPos % 32;
7430  int nIdIndex = nIdPos / 32;
7431 
7432  assert(nIdIndex < (networkConfigs[netId].numNSpikeGen / 32 + 1));
7433 
7434  managerRuntimeData.spikeGenBits[nIdIndex] |= (1 << nIdBitPos);
7435  }
7436  }
7437 }
7438 
7439 void SNN::startTiming() { prevExecutionTime = cumExecutionTime; }
7440 void SNN::stopTiming() {
7441  executionTime += (cumExecutionTime - prevExecutionTime);
7442  prevExecutionTime = cumExecutionTime;
7443 }
7444 
7445 // enters testing phase
7446 // in testing, no weight changes can be made, allowing you to evaluate learned weights, etc.
7447 void SNN::startTesting(bool shallUpdateWeights) {
7448  // because this can be called at any point in time, if we're off the 1-second grid, we want to make
7449  // sure to apply the accumulated weight changes to the weight matrix
7450  // but we don't reset the wt update interval counter
7451  if (shallUpdateWeights && !sim_in_testing) {
7452  // careful: need to temporarily adjust stdpScaleFactor to make this right
7453  if (wtANDwtChangeUpdateIntervalCnt_) {
7454  float storeScaleSTDP = stdpScaleFactor_;
7455  stdpScaleFactor_ = 1.0f/wtANDwtChangeUpdateIntervalCnt_;
7456 
7457  updateWeights();
7458 
7459  stdpScaleFactor_ = storeScaleSTDP;
7460  }
7461  }
7462 
7463  sim_in_testing = true;
7464 
7465  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
7466  if (!groupPartitionLists[netId].empty()) {
7467  networkConfigs[netId].sim_in_testing = true;
7468  updateNetworkConfig(netId); // update networkConfigRT struct (|TODO copy only a single boolean)
7469  }
7470  }
7471 }
7472 
7473 // exits testing phase
7475  sim_in_testing = false;
7476 
7477  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
7478  if (!groupPartitionLists[netId].empty()) {
7479  networkConfigs[netId].sim_in_testing = false;
7480  updateNetworkConfig(netId); // update networkConfigRT struct (|TODO copy only a single boolean)
7481  }
7482  }
7483 }
7484 
7485 void SNN::updateConnectionMonitor(short int connId) {
7486  for (int monId=0; monId<numConnectionMonitor; monId++) {
7487  if (connId==ALL || connMonCoreList[monId]->getConnectId()==connId) {
7488  int timeInterval = connMonCoreList[monId]->getUpdateTimeIntervalSec();
7489  if (timeInterval==1 || timeInterval>1 && (getSimTime()%timeInterval)==0) {
7490  // this ConnectionMonitor wants periodic recording
7491  connMonCoreList[monId]->writeConnectFileSnapshot(simTime,
7492  getWeightMatrix2D(connMonCoreList[monId]->getConnectId()));
7493  }
7494  }
7495  }
7496 }
7497 
7498 // FIXME: modify this for multi-GPUs
7499 std::vector< std::vector<float> > SNN::getWeightMatrix2D(short int connId) {
7500  assert(connId > ALL); // ALL == -1
7501  std::vector< std::vector<float> > wtConnId;
7502 
7503  int grpIdPre = connectConfigMap[connId].grpSrc;
7504  int grpIdPost = connectConfigMap[connId].grpDest;
7505 
7506  int netIdPost = groupConfigMDMap[grpIdPost].netId;
7507  int lGrpIdPost = groupConfigMDMap[grpIdPost].lGrpId;
7508 
7509  // init weight matrix with right dimensions
7510  for (int i = 0; i < groupConfigMap[grpIdPre].numN; i++) {
7511  std::vector<float> wtSlice;
7512  for (int j = 0; j < groupConfigMap[grpIdPost].numN; j++) {
7513  wtSlice.push_back(NAN);
7514  }
7515  wtConnId.push_back(wtSlice);
7516  }
7517 
7518  // copy the weights for a given post-group from device
7519  // \TODO: check if the weights for this grpIdPost have already been copied
7520  // \TODO: even better, but tricky because of ordering, make copyWeightState connection-based
7521 
7522  assert(grpIdPost > ALL); // ALL == -1
7523 
7524  // Note, copyWeightState() also copies pre-connections information (e.g., Npre, Npre_plastic, cumulativePre, and preSynapticIds)
7525  fetchWeightState(netIdPost, lGrpIdPost);
7526  fetchConnIdsLookupArray(netIdPost);
7527 
7528  for (int lNIdPost = groupConfigs[netIdPost][lGrpIdPost].lStartN; lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN; lNIdPost++) {
7529  unsigned int pos_ij = managerRuntimeData.cumulativePre[lNIdPost];
7530  for (int i = 0; i < managerRuntimeData.Npre[lNIdPost]; i++, pos_ij++) {
7531  // skip synapses that belong to a different connection ID
7532  if (managerRuntimeData.connIdsPreIdx[pos_ij] != connId) //connInfo->connId)
7533  continue;
7534 
7535  // find pre-neuron ID and update ConnectionMonitor container
7536  int lNIdPre = GET_CONN_NEURON_ID(managerRuntimeData.preSynapticIds[pos_ij]);
7537  int lGrpIdPre = GET_CONN_GRP_ID(managerRuntimeData.preSynapticIds[pos_ij]);
7538  wtConnId[lNIdPre - groupConfigs[netIdPost][lGrpIdPre].lStartN][lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN] =
7539  fabs(managerRuntimeData.wt[pos_ij]);
7540  }
7541  }
7542 
7543  return wtConnId;
7544 }
7545 
7546 void SNN::updateGroupMonitor(int gGrpId) {
7547  // don't continue if no group monitors in the network
7548  if (!numGroupMonitor)
7549  return;
7550 
7551  if (gGrpId == ALL) {
7552  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
7553  updateGroupMonitor(gGrpId);
7554  } else {
7555  int netId = groupConfigMDMap[gGrpId].netId;
7556  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
7557  // update group monitor of a specific group
7558  // find index in group monitor arrays
7559  int monitorId = groupConfigMDMap[gGrpId].groupMonitorId;
7560 
7561  // don't continue if no group monitor enabled for this group
7562  if (monitorId < 0) return;
7563 
7564  // find last update time for this group
7565  GroupMonitorCore* grpMonObj = groupMonCoreList[monitorId];
7566  int lastUpdate = grpMonObj->getLastUpdated();
7567 
7568  // don't continue if time interval is zero (nothing to update)
7569  if (getSimTime() - lastUpdate <= 0)
7570  return;
7571 
7572  if (getSimTime() - lastUpdate > 1000)
7573  KERNEL_ERROR("updateGroupMonitor(grpId=%d) must be called at least once every second", gGrpId);
7574 
7575  // copy the group status (neuromodulators) to the manager runtime
7576  fetchGroupState(netId, lGrpId);
7577 
7578  // find the time interval in which to update group status
7579  // usually, we call updateGroupMonitor once every second, so the time interval is [0,1000)
7580  // however, updateGroupMonitor can be called at any time t \in [0,1000)... so we can have the cases
7581  // [0,t), [t,1000), and even [t1, t2)
7582  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
7583  int numMsMax = getSimTimeMs(); // upper bound is given by current time
7584  if (numMsMax == 0)
7585  numMsMax = 1000; // special case: full second
7586  assert(numMsMin < numMsMax);
7587 
7588  // current time is last completed second in milliseconds (plus t to be added below)
7589  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
7590  int currentTimeSec = getSimTimeSec();
7591  if (!getSimTimeMs())
7592  currentTimeSec--;
7593 
7594  // save current time as last update time
7595  grpMonObj->setLastUpdated(getSimTime());
7596 
7597  // prepare fast access
7598  FILE* grpFileId = groupMonCoreList[monitorId]->getGroupFileId();
7599  bool writeGroupToFile = grpFileId != NULL;
7600  bool writeGroupToArray = grpMonObj->isRecording();
7601  //{ /featGroupMonitorFileWrite
7602  //float data;
7603  // LN20201003
7604  float data[4] = {.0f, .0f, .0f, .0f}; // DA,5HT,ACh,NE --> solution: DA or all 4 -> 1 flag in monitor creation: full .. ..
7605  // -> header
7606  // argumentation: most applications are DA , her a definitive need for OAT
7607  // if "Krichmar.full" .. is implemented, usually all neurotransmitter work in concert,
7608  // then all are neccessary => flag: DA only or all neuro transmitter 4 ?
7609  size_t nTransM = grpMonObj->isInAllMode()?4:1; // nTransmitter, FULL = 4, DA = 1 --> init, mode , header
7610  //}
7611 
7612  // Read one peice of data at a time from the buffer and put the data to an appopriate monitor buffer. Later the user
7613  // may need need to dump these group status data to an output file
7614  for(int t = numMsMin; t < numMsMax; t++) {
7615  // fetch group status data, support dopamine concentration currently
7616  data[0] = managerRuntimeData.grpDABuffer[lGrpId * 1000 + t];
7617  // prepared for full mode
7618  if(grpMonObj->isInAllMode()) {
7619 #define LN_FIX_GRP_ALL_BUFFER
7620  data[1] = managerRuntimeData.grp5HTBuffer[lGrpId * 1000 + t]; // ISSUE, does this work ? if no recording
7621  //printf("data[1]=%f\n", data[1]);
7622  data[2] = managerRuntimeData.grpAChBuffer[lGrpId * 1000 + t];
7623  //data[4] = managerRuntimeData.grpNEBuffer[lGrpId * 1000 + t]; // LN20201017 Ooopsi 1>c:\test\github\carlsim4\carlsim\kernel\src\snn_manager.cpp(6402): warning C4789: buffer 'data' of size 16 bytes will be overrun; 4 bytes will be written starting at offset 16
7624  data[3] = managerRuntimeData.grpNEBuffer[lGrpId * 1000 + t]; // LN20201017 Ooopsi 1>c:\test\github\carlsim4\carlsim\kernel\src\snn_manager.cpp(6402): warning C4789: buffer 'data' of size 16 bytes will be overrun; 4 bytes will be written starting at offset 16
7625  }
7626  // current time is last completed second plus whatever is leftover in t
7627  int time = currentTimeSec * 1000 + t;
7628 
7629  if (writeGroupToFile) {
7630  // TODO: write to group status file
7631  //{ /featGroupMonitorFileWrite LN20201003 --> to monitor DA concentration with OAT
7632  size_t cnt;
7633  cnt = fwrite(&time, sizeof(int), 1, grpFileId); assert(cnt==1);
7634  cnt = fwrite(&data, sizeof(float), nTransM, grpFileId); assert(cnt==nTransM); // DA[,HT,ACh,NE]
7635  //}
7636  }
7637 
7638  if (writeGroupToArray) {
7639  if(grpMonObj->isInAllMode())
7640  grpMonObj->pushData(time, data[0], data[1], data[2], data[3]);
7641  else
7642  grpMonObj->pushData(time, data[0]); // LN backward compatibilty DA only
7643  }
7644  }
7645 
7646  if (grpFileId!=NULL) // flush group status file
7647  fflush(grpFileId);
7648  }
7649 }
7650 
7651 // FIXME: wrong to use groupConfigs[0]
7652 void SNN::userDefinedSpikeGenerator(int gGrpId) {
7653  // \FIXME this function is a mess
7654  SpikeGeneratorCore* spikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc;
7655  int netId = groupConfigMDMap[gGrpId].netId;
7656  int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
7657  int currTime = simTime;
7658  bool done;
7659 
7660  fetchLastSpikeTime(netId);
7661 
7662  for(int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
7663  // start the time from the last time it spiked, that way we can ensure that the refractory period is maintained
7664  int lNId = gNId + groupConfigMDMap[gGrpId].GtoLOffset;
7665  int nextTime = managerRuntimeData.lastSpikeTime[lNId];
7666  if (nextTime == MAX_SIMULATION_TIME)
7667  nextTime = 0;
7668 
7669  // the end of the valid time window is either the length of the scheduling time slice from now (because that
7670  // is the max of the allowed propagated buffer size) or simply the end of the simulation
7671  int endOfTimeWindow = std::min<int>(currTime+timeSlice, simTimeRunStop);
7672 
7673  done = false;
7674  while (!done) {
7675  // generate the next spike time (nextSchedTime) from the nextSpikeTime callback
7676  int nextSchedTime = spikeGenFunc->nextSpikeTime(this, gGrpId, gNId - groupConfigMDMap[gGrpId].gStartN, currTime, nextTime, endOfTimeWindow);
7677 
7678  // the generated spike time is valid only if:
7679  // - it has not been scheduled before (nextSchedTime > nextTime)
7680  // - but careful: we would drop spikes at t=0, because we cannot initialize nextTime to -1...
7681  // - it is within the scheduling time slice (nextSchedTime < endOfTimeWindow)
7682  // - it is not in the past (nextSchedTime >= currTime)
7683  if ((nextSchedTime==0 || nextSchedTime>nextTime) && nextSchedTime<endOfTimeWindow && nextSchedTime>=currTime) {
7684 // fprintf(stderr,"%u: spike scheduled for %d at %u\n",currTime, i-groupConfigs[0][grpId].StartN,nextSchedTime);
7685  // scheduled spike...
7686  // \TODO CPU mode does not check whether the same AER event has been scheduled before (bug #212)
7687  // check how GPU mode does it, then do the same here.
7688  nextTime = nextSchedTime;
7689  spikeBuf->schedule(gNId, gGrpId, nextTime - currTime);
7690  } else {
7691  done = true;
7692  }
7693  }
7694  }
7695 }
7696 
7697 void SNN::generateUserDefinedSpikes() {
7698  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
7699  if (groupConfigMap[gGrpId].isSpikeGenerator) {
7700  // This evaluation is done to check if its time to get new set of spikes..
7701  // check whether simTime has advance more than the current time slice, in which case we need to schedule
7702  // spikes for the next time slice
7703  // we always have to run this the first millisecond of a new runNetwork call; that is,
7704  // when simTime==simTimeRunStart
7705  if(((simTime - groupConfigMDMap[gGrpId].sliceUpdateTime) >= groupConfigMDMap[gGrpId].currTimeSlice || simTime == simTimeRunStart)) {
7706  int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
7707  groupConfigMDMap[gGrpId].sliceUpdateTime = simTime;
7708 
7709  // we dont generate any poisson spike if during the
7710  // current call we might exceed the maximum 32 bit integer value
7711  if ((simTime + timeSlice) == MAX_SIMULATION_TIME || (simTime + timeSlice) < 0)
7712  return;
7713 
7714  if (groupConfigMap[gGrpId].spikeGenFunc != NULL) {
7715  userDefinedSpikeGenerator(gGrpId);
7716  }
7717  }
7718  }
7719  }
7720 }
7721 
7727 void SNN::allocateManagerSpikeTables() {
7728  managerRuntimeData.firingTableD2 = new int[managerRTDSize.maxMaxSpikeD2];
7729  managerRuntimeData.firingTableD1 = new int[managerRTDSize.maxMaxSpikeD1];
7730 
7731 #ifdef LN_AXON_PLAST
7732  managerRuntimeData.firingTimesD2 = new unsigned int[managerRTDSize.maxMaxSpikeD2];
7733 #endif
7734 
7735  managerRuntimeData.extFiringTableEndIdxD2 = new int[managerRTDSize.maxNumGroups];
7736  managerRuntimeData.extFiringTableEndIdxD1 = new int[managerRTDSize.maxNumGroups];
7737  managerRuntimeData.extFiringTableD2 = new int*[managerRTDSize.maxNumGroups];
7738  managerRuntimeData.extFiringTableD1 = new int*[managerRTDSize.maxNumGroups];
7739 
7740  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.firingTableD2, sizeof(int) * managerRTDSize.maxMaxSpikeD2));
7741  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.firingTableD1, sizeof(int) * managerRTDSize.maxMaxSpikeD1));
7742  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableEndIdxD2, sizeof(int) * managerRTDSize.maxNumGroups));
7743  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableEndIdxD1, sizeof(int) * managerRTDSize.maxNumGroups));
7744  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableD2, sizeof(int*) * managerRTDSize.maxNumGroups));
7745  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableD1, sizeof(int*) * managerRTDSize.maxNumGroups));
7746  resetFiringTable();
7747 
7748  managerRuntimeData.timeTableD2 = new unsigned int[TIMING_COUNT];
7749  managerRuntimeData.timeTableD1 = new unsigned int[TIMING_COUNT];
7750  resetTimeTable();
7751 }
7752 
7753 // updates simTime, returns true when new second started
7754 bool SNN::updateTime() {
7755  bool finishedOneSec = false;
7756 
7757  // done one second worth of simulation
7758  // update relevant parameters...now
7759  if(++simTimeMs == 1000) {
7760  simTimeMs = 0;
7761  simTimeSec++;
7762  finishedOneSec = true;
7763  }
7764 
7765  simTime++;
7766  if(simTime == MAX_SIMULATION_TIME || simTime < 0){
7767  // reached the maximum limit of the simulation time using 32 bit value...
7768  KERNEL_WARN("Maximum Simulation Time Reached...Resetting simulation time");
7769  }
7770 
7771  return finishedOneSec;
7772 }
7773 
7774 #ifdef LN_UPDATE_CURSPIKES
7775 //#define DEBUG_UPDATE_CURSPIKES
7776 // LN20201101 mock to test fetchCurSpikes
7777 void SNN::updateCurSpike(std::vector<bool>& gFiring, int netId) {
7778 
7779  if (netId == ALL) {
7780  for (int id = 0; id < MAX_NET_PER_SNN; id ++)
7781  if (!groupPartitionLists[id].empty())
7782  updateCurSpike(gFiring, id);
7783  }
7784  else {
7785  // translate CurSpike of regular neurons into firing
7786  int regLength = networkConfigs[netId].numNReg;
7787  assert(gFiring.size() >= regLength);
7788 
7789  int genLength = networkConfigs[netId].numNSpikeGen;
7790  assert(gFiring.size() >= regLength + genLength);
7791 
7792  int nPois = networkConfigs[netId].numNPois;
7793  int poisLength = nPois - genLength;
7794  assert(gFiring.size() >= regLength + poisLength);
7795 
7796  assert(networkConfigs[netId].numN >= regLength + genLength + poisLength); // lif
7797 
7798  int rateGenLength = networkConfigs[netId].numNRateGen;
7799 
7800  int length = networkConfigs[netId].numN;
7801  std::vector<bool> lFiring(length, false);
7802 
7803 #ifdef DEBUG_UPDATE_CURSPIKES
7804  printf("numN: %d numNReg:%d numNPois:%d numNSpikeGen:%d numNPoisGen:%d\n",
7805  length, regLength, poisLength, genLength, rateGenLength);
7806 #endif
7807 
7808  // copy the neuron firing information to the manager runtime
7809  fetchCurSpike(netId);
7810 
7811  // copy Poisson neuron firing to the manager runtime
7812  if (poisLength > 0) {
7813  fetchRandNum(netId);
7814  fetchPoissonFireRate(netId);
7815  }
7816 
7817  // copy SpikeGenerator neuron firing to the manager runtime
7818  if (genLength > 0) {
7819  fetchSpikeGenBits(netId);
7820  }
7821 
7822 
7823  if (genLength > 0) {
7824 #ifdef DEBUG_UPDATE_CURSPIKES
7825  printf("SpikGen: %d, Spikes (t=%01d.%03ds): ", regLength, simTimeMs / 1000, simTimeMs); // gen is captured before the step
7826 #endif
7827  for (int lNId = regLength; lNId < genLength + regLength; lNId++) {
7828  const int nIdPos = lNId - regLength;
7829  //\sa getSpikeGenBit (crashes on GPU)
7830  const int nIdBitPos = nIdPos % 32;
7831  const int nIdIndex = nIdPos / 32;
7832  bool fired = ((managerRuntimeData.spikeGenBits[nIdIndex] >> nIdBitPos) & 0x1);
7833  lFiring[lNId] = fired;
7834 #ifdef DEBUG_UPDATE_CURSPIKES
7835  printf("%s", fired ? "^" : "-");
7836 #endif
7837  }
7838 #ifdef DEBUG_UPDATE_CURSPIKES
7839  printf("\n");
7840 #endif
7841  }
7842 
7843  if (poisLength > 0) {
7844 #ifdef DEBUG_UPDATE_CURSPIKES
7845  printf("Poisson: %d, Spikes (t=%01d.%03ds): ", regLength, simTimeMs / 1000, simTimeMs);
7846 #endif
7847  for (int lNId = regLength + genLength; lNId < regLength + genLength + poisLength; lNId++) {
7848  //\sa getPoissonSpike (crashes on GPU)
7849  bool fired = managerRuntimeData.randNum[lNId - regLength] * 1000.0f
7850  < managerRuntimeData.poissonFireRate[lNId - regLength];
7851  lFiring[lNId] = fired;
7852 #ifdef DEBUG_UPDATE_CURSPIKES
7853  printf("%s", fired ? "^" : "-");
7854 #endif
7855  }
7856 #ifdef DEBUG_UPDATE_CURSPIKES
7857  printf("\n");
7858 #endif
7859  }
7860 
7861 #ifdef DEBUG_UPDATE_CURSPIKES
7862  printf("Regular: %d, Spikes (t=%01d.%03ds): ", regLength, simTimeMs / 1000, simTimeMs);
7863 #endif
7864 
7865  for (int lNId = 0; lNId < regLength; lNId++) {
7866  bool fired = managerRuntimeData.curSpike[lNId];
7867  lFiring[lNId] = fired;
7868 #ifdef DEBUG_UPDATE_CURSPIKES
7869  printf("%s", fired ? "^" : "-");
7870 #endif
7871  }
7872 #ifdef DEBUG_UPDATE_CURSPIKES
7873  printf("\n");
7874 #endif
7875 
7876  // translate local to global
7877  //std::map<int, GroupConfigMD>
7878  for (auto iter = groupConfigMDMap.begin(); iter != groupConfigMDMap.end(); iter++) {
7879  int gGrpId = iter->first;
7880  GroupConfigMD &groupConfig = iter->second;
7881  if (groupConfig.netId == netId) {
7882  for (int lNId = groupConfig.lStartN, gNId = groupConfig.gStartN;
7883  lNId <= groupConfig.lEndN && gNId <= groupConfig.gEndN;
7884  lNId++, gNId++)
7885  {
7886  assert(gNId == lNId + groupConfig.LtoGOffset);
7887  assert(lNId == gNId + groupConfig.GtoLOffset);
7888  gFiring[gNId] = lFiring[lNId];
7889  }
7890  }
7891  }
7892 
7893  }
7894 }
7895 #endif
7896 
7897 #ifdef LN_UPDATE_CURSPIKES_MT
7898 //#define DEBUG_UPDATE_CURSPIKES_MT
7899 //#define DEBUG_UPDATE_CURSPIKES_MT_TIMING
7900 //#define DEBUG_UPDATE_CURSPIKES_MT_MAP_TIMING
7901 //#define DEBUG_UPDATE_CURSPIKES_MT_MAP_SEQ
7902 #include <thread>
7903 #include <chrono>
7904 //#define DEBUG_UPDATE_CURSPIKES
7905 // LN20201101 mock to test fetchCurSpikes
7906 // https://en.cppreference.com/w/cpp/language/lambda
7907 // https://en.cppreference.com/w/cpp/thread/thread/join
7908 void SNN::updateCurSpikeMT(std::vector<bool>& gFiring, int netId) {
7909 
7910  // prototype does not support -1 see Single Threaded solution
7911 
7912  //if (netId == ALL) {
7913  // for (int id = 0; id < MAX_NET_PER_SNN; id++)
7914  // if (!groupPartitionLists[id].empty())
7915  // updateCurSpike(gFiring, id);
7916  //}
7917  //else {
7918 
7919 
7920  // translate CurSpike of regular neurons into firing
7921  int regLength = networkConfigs[netId].numNReg;
7922  assert(gFiring.size() >= regLength);
7923 
7924  int genLength = networkConfigs[netId].numNSpikeGen;
7925  assert(gFiring.size() >= regLength + genLength);
7926 
7927  int nPois = networkConfigs[netId].numNPois;
7928  int poisLength = nPois - genLength;
7929  assert(gFiring.size() >= regLength + poisLength);
7930 
7931  assert(networkConfigs[netId].numN >= regLength + genLength + poisLength); // lif
7932 
7933  int rateGenLength = networkConfigs[netId].numNRateGen;
7934 
7935  int length = networkConfigs[netId].numN;
7936  std::vector<bool> lFiring(length, false);
7937 
7938 #ifdef DEBUG_UPDATE_CURSPIKES_MT
7939  printf("numN: %d numNReg:%d numNPois:%d numNSpikeGen:%d numNPoisGen:%d\n",
7940  length, regLength, poisLength, genLength, rateGenLength);
7941 #endif
7942 
7943  // copy the neuron firing information to the manager runtime
7944  fetchCurSpike(netId);
7945 
7946  // copy Poisson neuron firing to the manager runtime
7947  if (poisLength > 0) {
7948  fetchRandNum(netId);
7949  fetchPoissonFireRate(netId);
7950  }
7951 
7952  // copy SpikeGenerator neuron firing to the manager runtime
7953  if (genLength > 0) {
7954  fetchSpikeGenBits(netId);
7955  }
7956 
7957  std::vector<std::thread> gen_workers(genLength); // avoid less performant push_back
7958  std::vector<std::thread> pois_workers(poisLength); // avoid less performant push_back
7959  std::vector<std::thread> reg_workers(regLength); // avoid less performant push_back
7960 
7961  if (genLength > 0) {
7962  for (int lNId = regLength; lNId < genLength + regLength; lNId++) {
7963  auto worker = [&, lNId]() { // all be ref, except lNId by copy
7964  const int nIdPos = lNId - regLength;
7965  //\sa getSpikeGenBit (crashes on GPU)
7966  const int nIdBitPos = nIdPos % 32;
7967  const int nIdIndex = nIdPos / 32;
7968  bool fired = ((managerRuntimeData.spikeGenBits[nIdIndex] >> nIdBitPos) & 0x1);
7969 #ifdef DEBUG_UPDATE_CURSPIKES_MT_TIMING
7970  std::this_thread::sleep_for(std::chrono::seconds(2));
7971 #endif
7972  lFiring[lNId] = fired;
7973  };
7974  gen_workers[lNId-regLength] = std::thread(worker);
7975  }
7976  assert(gen_workers.size() == genLength);
7977  }
7978 
7979  if (poisLength>0) {
7980  for (int lNId = regLength + genLength; lNId<regLength + genLength + poisLength; lNId++) {
7981  auto worker = [&, lNId]() {
7982  //\sa getPoissonSpike (crashes on GPU)
7983  bool fired = managerRuntimeData.randNum[lNId - regLength] * 1000.0f
7984  < managerRuntimeData.poissonFireRate[lNId - regLength];
7985 #ifdef DEBUG_UPDATE_CURSPIKES_MT_TIMING
7986  std::this_thread::sleep_for(std::chrono::seconds(8));
7987 #endif
7988  lFiring[lNId] = fired;
7989  };
7990  pois_workers[lNId - regLength - genLength] = std::thread(worker);
7991  }
7992  assert(pois_workers.size() == poisLength);
7993  }
7994 
7995  if (regLength > 0) {
7996  for (int lNId = 0; lNId < regLength; lNId++) {
7997  auto worker = [&, lNId]() {
7998  bool fired = managerRuntimeData.curSpike[lNId];
7999 #ifdef DEBUG_UPDATE_CURSPIKES_MT_TIMING
8000  std::this_thread::sleep_for(std::chrono::seconds(10));
8001 #endif
8002  lFiring[lNId] = fired;
8003  };
8004  reg_workers[lNId] = std::thread(worker);
8005  }
8006  assert(reg_workers.size() == regLength);
8007  }
8008 
8009  // later
8010  for (int lNId = regLength; lNId < genLength + regLength; lNId++)
8011  gen_workers[lNId-regLength].join();
8012 
8013  for (int lNId = regLength + genLength; lNId<regLength + genLength + poisLength; lNId++)
8014  pois_workers[lNId - regLength - genLength].join();
8015 
8016  for (int lNId = 0; lNId<regLength; lNId++)
8017  reg_workers[lNId].join();
8018 
8019 
8020  // after join later
8021 #ifdef DEBUG_UPDATE_CURSPIKES_MT
8022  if (genLength > 0) {
8023  printf("SpikGen: %d, Spikes (t=%01d.%03ds): ", regLength, simTimeMs / 1000, simTimeMs); // gen is captured before the step
8024  for (int lNId = regLength; lNId < genLength + regLength; lNId++)
8025  printf("%s", lFiring[lNId] ? "^" : "-");
8026  printf("\n");
8027  }
8028  if (poisLength > 0) {
8029  printf("Poisson: %d, Spikes (t=%01d.%03ds): ", regLength, simTimeMs / 1000, simTimeMs);
8030  for (int lNId = regLength + genLength; lNId < regLength + genLength + poisLength; lNId++)
8031  printf("%s", lFiring[lNId] ? "^" : "-");
8032  printf("\n");
8033  }
8034  if (regLength > 0) {
8035  printf("Regular: %d, Spikes (t=%01d.%03ds): ", regLength, simTimeMs / 1000, simTimeMs);
8036  for (int lNId = 0; lNId < regLength; lNId++)
8037  printf("%s", lFiring[lNId] ? "^" : "-");
8038  printf("\n");
8039  }
8040 #endif
8041 
8042 
8043  // translate local to global
8044  int threadIdx = 0;
8045  std::vector<std::thread> map_workers(groupConfigMDMap.size()); // avoid less performant push_back
8046  for (auto iter = groupConfigMDMap.begin(); iter != groupConfigMDMap.end(); iter++) {
8047  auto worker = [&, iter]() { // copy iter and therefor the reference to groupConfig
8048  int gGrpId = iter->first;
8049  GroupConfigMD &groupConfig = iter->second; // ref groupConfig
8050  if (groupConfig.netId == netId) {
8051  for (int lNId = groupConfig.lStartN, gNId = groupConfig.gStartN;
8052  lNId <= groupConfig.lEndN && gNId <= groupConfig.gEndN;
8053  lNId++, gNId++)
8054  {
8055  assert(gNId == lNId + groupConfig.LtoGOffset);
8056  assert(lNId == gNId + groupConfig.GtoLOffset);
8057 #ifdef DEBUG_UPDATE_CURSPIKES_MT_MAP_TIMING
8058  //std::this_thread::sleep_for(std::chrono::seconds(1)); // pretty deterministic randomness
8059  std::this_thread::sleep_for(std::chrono::milliseconds(lNId)); // pretty deterministic randomness
8060 #endif
8061  gFiring[gNId] = lFiring[lNId];
8062  }
8063  }
8064  };
8065 #ifdef DEBUG_UPDATE_CURSPIKES_MT_MAP_SEQ
8066  worker(); // sequencial: sum all groups * 1s = 12s reg
8067 #else
8068  map_workers[threadIdx++] = std::thread(worker); // parallel: 1s * largest group = 4s
8069 #endif
8070  }
8071 #ifndef DEBUG_UPDATE_CURSPIKES_MT_MAP_SEQ
8072  assert(threadIdx == groupConfigMDMap.size()); // hint: comment out to run sequ
8073  for(threadIdx=0; threadIdx<map_workers.size(); threadIdx++)
8074  map_workers[threadIdx].join();
8075 #endif
8076 
8077 }
8078 
8079 #endif //CURSPIKES_MT
8080 
8081 
8082 #ifdef LN_AXON_PLAST
8083 void SNN::findWavefrontPath(std::vector<int>& path, std::vector<float>& eligibility, int netId, int grpId, int startNId, int goalNId) {
8084  if (netId < CPU_RUNTIME_BASE)
8085  ; // GPU runtime
8086  else {
8087  findWavefrontPath_CPU(path, eligibility, netId, grpId, startNId, goalNId);
8088  }
8089 
8090 }
8091 
8092 
8093 bool SNN::updateDelays(int gGrpIdPre, int gGrpIdPost, std::vector<std::tuple<int, int, uint8_t>> connDelays) {
8094  int netIdPre = groupConfigMDMap[gGrpIdPre].netId;
8095  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
8096  assert(netIdPre == netIdPost); // KERNEL Error
8097  int netId = netIdPre;
8098  int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
8099  int lGrpIdPre = -1; // SNN::getDelays
8100  for (int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
8101  if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
8102  lGrpIdPre = lGrpId;
8103  break;
8104  }
8105  assert(lGrpIdPre != -1);
8106 
8107  bool success = false;
8108  if (netId < CPU_RUNTIME_BASE) // GPU runtime
8109  success = updateDelays_GPU(netId, lGrpIdPre, lGrpIdPost, connDelays);
8110  else {
8111  success = updateDelays_CPU(netId, lGrpIdPre, lGrpIdPost, connDelays);
8112  }
8113  return success;
8114 }
8115 
8116 
8117 void SNN::printEntrails(char* buffer, unsigned length, int gGrpIdPre, int gGrpIdPost) {
8118 
8119  int netIdPre = groupConfigMDMap[gGrpIdPre].netId;
8120  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
8121  assert(netIdPre == netIdPost); // KERNEL Error
8122  int netId = netIdPre;
8123  int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
8124  int lGrpIdPre = -1; // SNN::getDelays
8125  for (int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
8126  if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
8127  lGrpIdPre = lGrpId;
8128  break;
8129  }
8130  assert(lGrpIdPre != -1);
8131 
8132  if (netId < CPU_RUNTIME_BASE) // GPU runtime
8133  printEntrails_GPU(buffer, length, netId, lGrpIdPre, lGrpIdPost);
8134  else {
8135  //#ifdef __NO_PTHREADS__
8136  printEntrails_CPU(buffer, length, netId, lGrpIdPre, lGrpIdPost);
8137  }
8138 
8139 }
8140 
8141 
8142 #endif
8143 
8144 // FIXME: modify this for multi-GPUs
8145 void SNN::updateSpikeMonitor(int gGrpId) {
8146  // don't continue if no spike monitors in the network
8147  if (!numSpikeMonitor)
8148  return;
8149 
8150  if (gGrpId == ALL) {
8151  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
8152  updateSpikeMonitor(gGrpId);
8153  } else {
8154  int netId = groupConfigMDMap[gGrpId].netId;
8155  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
8156  // update spike monitor of a specific group
8157  // find index in spike monitor arrays
8158  int monitorId = groupConfigMDMap[gGrpId].spikeMonitorId;
8159 
8160  // don't continue if no spike monitor enabled for this group
8161  if (monitorId < 0) return;
8162 
8163  // find last update time for this group
8164  SpikeMonitorCore* spkMonObj = spikeMonCoreList[monitorId];
8165  long int lastUpdate = spkMonObj->getLastUpdated();
8166 
8167  // don't continue if time interval is zero (nothing to update)
8168  if ( ((long int)getSimTime()) - lastUpdate <= 0)
8169  return;
8170 
8171  if ( ((long int)getSimTime()) - lastUpdate > 1000)
8172  KERNEL_ERROR("updateSpikeMonitor(grpId=%d) must be called at least once every second",gGrpId);
8173 
8174  // AER buffer max size warning here.
8175  // Because of C++ short-circuit evaluation, the last condition should not be evaluated
8176  // if the previous conditions are false.
8177  if (spkMonObj->getAccumTime() > LONG_SPIKE_MON_DURATION \
8178  && this->getGroupNumNeurons(gGrpId) > LARGE_SPIKE_MON_GRP_SIZE \
8179  && spkMonObj->isBufferBig()){
8180  // change this warning message to correct message
8181  KERNEL_WARN("updateSpikeMonitor(grpId=%d) is becoming very large. (>%lu MB)",gGrpId,(long int) MAX_SPIKE_MON_BUFFER_SIZE/1024 );// make this better
8182  KERNEL_WARN("Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.",spkMonObj->getAccumTime()/(1000*60),this->getGroupNumNeurons(gGrpId));
8183  }
8184 
8185  // copy the neuron firing information to the manager runtime
8186  fetchSpikeTables(netId);
8187  fetchGrpIdsLookupArray(netId);
8188 
8189  // find the time interval in which to update spikes
8190  // usually, we call updateSpikeMonitor once every second, so the time interval is [0,1000)
8191  // however, updateSpikeMonitor can be called at any time t \in [0,1000)... so we can have the cases
8192  // [0,t), [t,1000), and even [t1, t2)
8193  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
8194  int numMsMax = getSimTimeMs(); // upper bound is given by current time
8195  if (numMsMax == 0)
8196  numMsMax = 1000; // special case: full second
8197  assert(numMsMin < numMsMax);
8198 
8199  // current time is last completed second in milliseconds (plus t to be added below)
8200  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
8201  int currentTimeSec = getSimTimeSec();
8202  if (!getSimTimeMs())
8203  currentTimeSec--;
8204 
8205  // save current time as last update time
8206  spkMonObj->setLastUpdated( (long int)getSimTime() );
8207 
8208  // prepare fast access
8209  FILE* spkFileId = spikeMonCoreList[monitorId]->getSpikeFileId();
8210  bool writeSpikesToFile = spkFileId != NULL;
8211  bool writeSpikesToArray = spkMonObj->getMode()==AER && spkMonObj->isRecording();
8212 
8213  // Read one spike at a time from the buffer and put the spikes to an appopriate monitor buffer. Later the user
8214  // may need need to dump these spikes to an output file
8215  for (int k = 0; k < 2; k++) {
8216  unsigned int* timeTablePtr = (k == 0) ? managerRuntimeData.timeTableD2 : managerRuntimeData.timeTableD1;
8217  int* fireTablePtr = (k == 0) ? managerRuntimeData.firingTableD2 : managerRuntimeData.firingTableD1;
8218  for(int t = numMsMin; t < numMsMax; t++) {
8219  for(auto i = timeTablePtr[t + glbNetworkConfig.maxDelay]; i < timeTablePtr[t + glbNetworkConfig.maxDelay + 1]; i++) {
8220  // retrieve the neuron id
8221  int lNId = fireTablePtr[i];
8222 
8223  // make sure neuron belongs to currently relevant group
8224  int this_grpId = managerRuntimeData.grpIds[lNId];
8225  if (this_grpId != lGrpId)
8226  continue;
8227 
8228  // adjust nid to be 0-indexed for each group
8229  // this way, if a group has 10 neurons, their IDs in the spike file and spike monitor will be
8230  // indexed from 0..9, no matter what their real nid is
8231  int nId = lNId - groupConfigs[netId][lGrpId].lStartN;
8232  assert(nId >= 0);
8233 
8234  // current time is last completed second plus whatever is leftover in t
8235  int time = currentTimeSec * 1000 + t;
8236 
8237  if (writeSpikesToFile) {
8238  int cnt;
8239  cnt = fwrite(&time, sizeof(int), 1, spkFileId); assert(cnt==1);
8240  cnt = fwrite(&nId, sizeof(int), 1, spkFileId); assert(cnt==1);
8241  }
8242 
8243  if (writeSpikesToArray) {
8244  spkMonObj->pushAER(time, nId);
8245  }
8246  }
8247  }
8248  }
8249 
8250  if (spkFileId!=NULL) // flush spike file
8251  fflush(spkFileId);
8252  }
8253 }
8254 
8255 // FIXME: modify this for multi-GPUs
8256 void SNN::updateNeuronMonitor(int gGrpId) {
8257  // don't continue if no neuron monitors in the network
8258  if (!numNeuronMonitor)
8259  return;
8260 
8261  //printf("The global group id is: %i\n", gGrpId);
8262 
8263  if (gGrpId == ALL) {
8264  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
8265  updateNeuronMonitor(gGrpId);
8266  }
8267  else {
8268  //printf("UpdateNeuronMonitor is being executed!\n");
8269  int netId = groupConfigMDMap[gGrpId].netId;
8270  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
8271  // update neuron monitor of a specific group
8272  // find index in neuron monitor arrays
8273  int monitorId = groupConfigMDMap[gGrpId].neuronMonitorId;
8274 
8275  // don't continue if no spike monitor enabled for this group
8276  if (monitorId < 0) return;
8277 
8278  // find last update time for this group
8279  NeuronMonitorCore* nrnMonObj = neuronMonCoreList[monitorId];
8280  long int lastUpdate = nrnMonObj->getLastUpdated();
8281 
8282  // don't continue if time interval is zero (nothing to update)
8283  if (((long int)getSimTime()) - lastUpdate <= 0)
8284  return;
8285 
8286  if (((long int)getSimTime()) - lastUpdate > 1000)
8287  KERNEL_ERROR("updateNeuronMonitor(grpId=%d) must be called at least once every second", gGrpId);
8288 
8289  // AER buffer max size warning here.
8290  // Because of C++ short-circuit evaluation, the last condition should not be evaluated
8291  // if the previous conditions are false.
8292 
8293  /*if (nrnMonObj->getAccumTime() > LONG_NEURON_MON_DURATION \
8294  && this->getGroupNumNeurons(gGrpId) > LARGE_NEURON_MON_GRP_SIZE \
8295  && nrnMonObj->isBufferBig()) {
8296  // change this warning message to correct message
8297  KERNEL_WARN("updateNeuronMonitor(grpId=%d) is becoming very large. (>%lu MB)", gGrpId, (long int)MAX_NEURON_MON_BUFFER_SIZE / 1024);// make this better
8298  KERNEL_WARN("Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.", nrnMonObj->getAccumTime() / (1000 * 60), this->getGroupNumNeurons(gGrpId));
8299  }*/
8300 
8301  // copy the neuron information to manager runtime
8302  fetchNeuronStateBuffer(netId, lGrpId);
8303 
8304  // find the time interval in which to update neuron state info
8305  // usually, we call updateNeuronMonitor once every second, so the time interval is [0,1000)
8306  // however, updateNeuronMonitor can be called at any time t \in [0,1000)... so we can have the cases
8307  // [0,t), [t,1000), and even [t1, t2)
8308  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
8309  int numMsMax = getSimTimeMs(); // upper bound is given by current time
8310  if (numMsMax == 0)
8311  numMsMax = 1000; // special case: full second
8312  assert(numMsMin < numMsMax);
8313  //KERNEL_INFO("lastUpdate: %d -- numMsMin: %d -- numMsMax: %d", lastUpdate, numMsMin, numMsMax);
8314 
8315  // current time is last completed second in milliseconds (plus t to be added below)
8316  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
8317  int currentTimeSec = getSimTimeSec();
8318  if (!getSimTimeMs())
8319  currentTimeSec--;
8320 
8321  // save current time as last update time
8322  nrnMonObj->setLastUpdated((long int)getSimTime());
8323 
8324  // prepare fast access
8325  FILE* nrnFileId = neuronMonCoreList[monitorId]->getNeuronFileId();
8326  bool writeNeuronStateToFile = nrnFileId != NULL;
8327  bool writeNeuronStateToArray = nrnMonObj->isRecording();
8328 
8329  // Read one neuron state value at a time from the buffer and put the neuron state values to an appopriate monitor buffer.
8330  // Later the user may need need to dump these neuron state values to an output file
8331  //printf("The numMsMin is: %i; and numMsMax is: %i\n", numMsMin, numMsMax);
8332  for (int t = numMsMin; t < numMsMax; t++) {
8333  int grpNumNeurons = groupConfigs[netId][lGrpId].lEndN - groupConfigs[netId][lGrpId].lStartN + 1;
8334  //printf("The lStartN is: %i; and lEndN is: %i\n", groupConfigs[netId][lGrpId].lStartN, groupConfigs[netId][lGrpId].lEndN);
8335  // for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
8336  for (int tmpNId = 0; tmpNId < std::min(MAX_NEURON_MON_GRP_SZIE, grpNumNeurons); tmpNId++) {
8337  int lNId = groupConfigs[netId][lGrpId].lStartN + tmpNId;
8338  float v, u, I;
8339 
8340  // make sure neuron belongs to currently relevant group
8341  int this_grpId = managerRuntimeData.grpIds[lNId];
8342  if (this_grpId != lGrpId)
8343  continue;
8344 
8345  // adjust nid to be 0-indexed for each group
8346  // this way, if a group has 10 neurons, their IDs in the spike file and spike monitor will be
8347  // indexed from 0..9, no matter what their real nid is
8348  int nId = lNId - groupConfigs[netId][lGrpId].lStartN;
8349  assert(nId >= 0);
8350 
8351  int idxBase = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * t + lGrpId * MAX_NEURON_MON_GRP_SZIE;
8352  v = managerRuntimeData.nVBuffer[idxBase + nId];
8353  u = managerRuntimeData.nUBuffer[idxBase + nId];
8354  I = managerRuntimeData.nIBuffer[idxBase + nId];
8355 
8356  //printf("Voltage recorded is: %f\n", v);
8357 
8358  // current time is last completed second plus whatever is leftover in t
8359  int time = currentTimeSec * 1000 + t;
8360 
8361  //KERNEL_INFO("t: %d -- time: %d --base: %d -- nId: %d -- v: %f -- u: %f, --I: %f", t, time, idxBase + nId, nId, v, u, I);
8362 
8363  // WRITE TO A TEXT FILE INSTEAD OF BINARY
8364  if (writeNeuronStateToFile) {
8365  //KERNEL_INFO("Save to file");
8366  int cnt;
8367  cnt = fwrite(&time, sizeof(int), 1, nrnFileId); assert(cnt == 1);
8368  cnt = fwrite(&nId, sizeof(int), 1, nrnFileId); assert(cnt == 1);
8369  cnt = fwrite(&v, sizeof(float), 1, nrnFileId); assert(cnt == 1);
8370  cnt = fwrite(&u, sizeof(float), 1, nrnFileId); assert(cnt == 1);
8371  cnt = fwrite(&I, sizeof(float), 1, nrnFileId); assert(cnt == 1);
8372  }
8373 
8374  if (writeNeuronStateToArray) {
8375  //KERNEL_INFO("Save to array");
8376  nrnMonObj->pushNeuronState(nId, v, u, I);
8377  }
8378  }
8379  }
8380 
8381  if (nrnFileId != NULL) // flush neuron state file
8382  fflush(nrnFileId);
8383  }
8384 }
8385 
8386 // FIXME: update summary format for multiGPUs
8387 void SNN::printSimSummary() {
8388  float etime;
8389 
8390  // FIXME: measure total execution time, and GPU excution time
8391  stopTiming();
8392  etime = executionTime;
8393 
8394  fetchNetworkSpikeCount();
8395 
8396  KERNEL_INFO("\n");
8397  KERNEL_INFO("******************** Simulation Summary ***************************");
8398 
8399  KERNEL_INFO("Network Parameters: \tnumNeurons = %d (numNExcReg:numNInhReg = %2.1f:%2.1f)",
8400  glbNetworkConfig.numN, 100.0 * glbNetworkConfig.numNExcReg / glbNetworkConfig.numN, 100.0 * glbNetworkConfig.numNInhReg / glbNetworkConfig.numN);
8401  KERNEL_INFO("\t\t\tnumSynapses = %d", glbNetworkConfig.numSynNet);
8402  KERNEL_INFO("\t\t\tmaxDelay = %d", glbNetworkConfig.maxDelay);
8403  KERNEL_INFO("Simulation Mode:\t%s",sim_with_conductances?"COBA":"CUBA");
8404  KERNEL_INFO("Random Seed:\t\t%d", randSeed_);
8405  KERNEL_INFO("Timing:\t\t\tModel Simulation Time = %lld sec", (unsigned long long)simTimeSec);
8406  KERNEL_INFO("\t\t\tActual Execution Time = %4.2f sec", etime/1000.0f);
8407  float speed = float(simTimeSec) / std::max(.001f, etime / 1000.0f);
8408 #ifdef _DEBUG
8409  const char* build = "(Debug)";
8410 #else
8411  const char* build = "";
8412 #endif
8413  if (speed >= 10.f) {
8414  KERNEL_INFO("\t\t\tSpeed Factor (Model/Real) = %.0f x %s", speed, build);
8415  } else
8416  if (speed < 1.0f) {
8417  KERNEL_INFO("\t\t\tSpeed Factor (Model/Real) = %2.1f %% %s", speed*100.f, build);
8418  } else
8419  KERNEL_INFO("\t\t\tSpeed Factor (Model/Real) = %1.1f x %s", speed, build);
8420  KERNEL_INFO("Average Firing Rate:\t2+ms delay = %3.3f Hz",
8421  glbNetworkConfig.numN2msDelay > 0 ? managerRuntimeData.spikeCountD2 / (1.0 * simTimeSec * glbNetworkConfig.numN2msDelay) : 0.0f);
8422  KERNEL_INFO("\t\t\t1ms delay = %3.3f Hz",
8423  glbNetworkConfig.numN1msDelay > 0 ? managerRuntimeData.spikeCountD1 / (1.0 * simTimeSec * glbNetworkConfig.numN1msDelay) : 0.0f);
8424  KERNEL_INFO("\t\t\tOverall = %3.3f Hz", managerRuntimeData.spikeCount / (1.0 * simTimeSec * glbNetworkConfig.numN));
8425  KERNEL_INFO("Overall Spike Count Transferred:");
8426  KERNEL_INFO("\t\t\t2+ms delay = %d", managerRuntimeData.spikeCountExtRxD2);
8427  KERNEL_INFO("\t\t\t1ms delay = %d", managerRuntimeData.spikeCountExtRxD1);
8428  KERNEL_INFO("Overall Spike Count:\t2+ms delay = %d", managerRuntimeData.spikeCountD2);
8429  KERNEL_INFO("\t\t\t1ms delay = %d", managerRuntimeData.spikeCountD1);
8430  KERNEL_INFO("\t\t\tTotal = %d", managerRuntimeData.spikeCount);
8431  KERNEL_INFO("*********************************************************************************\n");
8432 }
8433 
8434 //------------------------------ legacy code --------------------------------//
8435 
8436 // We parallelly cleanup the postSynapticIds array to minimize any other wastage in that array by compacting the store
8437 // Appropriate alignment specified by ALIGN_COMPACTION macro is used to ensure some level of alignment (if necessary)
8438 //void SNN::compactConnections() {
8439 // unsigned int* tmp_cumulativePost = new unsigned int[numN];
8440 // unsigned int* tmp_cumulativePre = new unsigned int[numN];
8441 // unsigned int lastCnt_pre = 0;
8442 // unsigned int lastCnt_post = 0;
8443 //
8444 // tmp_cumulativePost[0] = 0;
8445 // tmp_cumulativePre[0] = 0;
8446 //
8447 // for(int i=1; i < numN; i++) {
8448 // lastCnt_post = tmp_cumulativePost[i-1]+managerRuntimeData.Npost[i-1]; //position of last pointer
8449 // lastCnt_pre = tmp_cumulativePre[i-1]+managerRuntimeData.Npre[i-1]; //position of last pointer
8450 // #if COMPACTION_ALIGNMENT_POST
8451 // lastCnt_post= lastCnt_post + COMPACTION_ALIGNMENT_POST-lastCnt_post%COMPACTION_ALIGNMENT_POST;
8452 // lastCnt_pre = lastCnt_pre + COMPACTION_ALIGNMENT_PRE- lastCnt_pre%COMPACTION_ALIGNMENT_PRE;
8453 // #endif
8454 // tmp_cumulativePost[i] = lastCnt_post;
8455 // tmp_cumulativePre[i] = lastCnt_pre;
8456 // assert(tmp_cumulativePost[i] <= managerRuntimeData.cumulativePost[i]);
8457 // assert(tmp_cumulativePre[i] <= managerRuntimeData.cumulativePre[i]);
8458 // }
8459 //
8460 // // compress the post_synaptic array according to the new values of the tmp_cumulative counts....
8461 // unsigned int tmp_numPostSynNet = tmp_cumulativePost[numN-1]+managerRuntimeData.Npost[numN-1];
8462 // unsigned int tmp_numPreSynNet = tmp_cumulativePre[numN-1]+managerRuntimeData.Npre[numN-1];
8463 // assert(tmp_numPostSynNet <= allocatedPost);
8464 // assert(tmp_numPreSynNet <= allocatedPre);
8465 // assert(tmp_numPostSynNet <= numPostSynNet);
8466 // assert(tmp_numPreSynNet <= numPreSynNet);
8467 // KERNEL_DEBUG("******************");
8468 // KERNEL_DEBUG("CompactConnection: ");
8469 // KERNEL_DEBUG("******************");
8470 // KERNEL_DEBUG("old_postCnt = %d, new_postCnt = %d", numPostSynNet, tmp_numPostSynNet);
8471 // KERNEL_DEBUG("old_preCnt = %d, new_postCnt = %d", numPreSynNet, tmp_numPreSynNet);
8472 //
8473 // // new buffer with required size + 100 bytes of additional space just to provide limited overflow
8474 // SynInfo* tmp_postSynapticIds = new SynInfo[tmp_numPostSynNet+100];
8475 //
8476 // // new buffer with required size + 100 bytes of additional space just to provide limited overflow
8477 // SynInfo* tmp_preSynapticIds = new SynInfo[tmp_numPreSynNet+100];
8478 // float* tmp_wt = new float[tmp_numPreSynNet+100];
8479 // float* tmp_maxSynWt = new float[tmp_numPreSynNet+100];
8480 // short int *tmp_cumConnIdPre = new short int[tmp_numPreSynNet+100];
8481 // float *tmp_mulSynFast = new float[numConnections];
8482 // float *tmp_mulSynSlow = new float[numConnections];
8483 //
8484 // // compact synaptic information
8485 // for(int i=0; i<numN; i++) {
8486 // assert(tmp_cumulativePost[i] <= managerRuntimeData.cumulativePost[i]);
8487 // assert(tmp_cumulativePre[i] <= managerRuntimeData.cumulativePre[i]);
8488 // for( int j=0; j<managerRuntimeData.Npost[i]; j++) {
8489 // unsigned int tmpPos = tmp_cumulativePost[i]+j;
8490 // unsigned int oldPos = managerRuntimeData.cumulativePost[i]+j;
8491 // tmp_postSynapticIds[tmpPos] = managerRuntimeData.postSynapticIds[oldPos];
8492 // tmp_SynapticDelay[tmpPos] = tmp_SynapticDelay[oldPos];
8493 // }
8494 // for( int j=0; j<managerRuntimeData.Npre[i]; j++) {
8495 // unsigned int tmpPos = tmp_cumulativePre[i]+j;
8496 // unsigned int oldPos = managerRuntimeData.cumulativePre[i]+j;
8497 // tmp_preSynapticIds[tmpPos] = managerRuntimeData.preSynapticIds[oldPos];
8498 // tmp_maxSynWt[tmpPos] = managerRuntimeData.maxSynWt[oldPos];
8499 // tmp_wt[tmpPos] = managerRuntimeData.wt[oldPos];
8500 // tmp_cumConnIdPre[tmpPos] = managerRuntimeData.connIdsPreIdx[oldPos];
8501 // }
8502 // }
8503 //
8504 // // delete old buffer space
8505 // delete[] managerRuntimeData.postSynapticIds;
8506 // managerRuntimeData.postSynapticIds = tmp_postSynapticIds;
8507 // cpuSnnSz.networkInfoSize -= (sizeof(SynInfo)*numPostSynNet);
8508 // cpuSnnSz.networkInfoSize += (sizeof(SynInfo)*(tmp_numPostSynNet+100));
8509 //
8510 // delete[] managerRuntimeData.cumulativePost;
8511 // managerRuntimeData.cumulativePost = tmp_cumulativePost;
8512 //
8513 // delete[] managerRuntimeData.cumulativePre;
8514 // managerRuntimeData.cumulativePre = tmp_cumulativePre;
8515 //
8516 // delete[] managerRuntimeData.maxSynWt;
8517 // managerRuntimeData.maxSynWt = tmp_maxSynWt;
8518 // cpuSnnSz.synapticInfoSize -= (sizeof(float)*numPreSynNet);
8519 // cpuSnnSz.synapticInfoSize += (sizeof(float)*(tmp_numPreSynNet+100));
8520 //
8521 // delete[] managerRuntimeData.wt;
8522 // managerRuntimeData.wt = tmp_wt;
8523 // cpuSnnSz.synapticInfoSize -= (sizeof(float)*numPreSynNet);
8524 // cpuSnnSz.synapticInfoSize += (sizeof(float)*(tmp_numPreSynNet+100));
8525 //
8526 // delete[] managerRuntimeData.connIdsPreIdx;
8527 // managerRuntimeData.connIdsPreIdx = tmp_cumConnIdPre;
8528 // cpuSnnSz.synapticInfoSize -= (sizeof(short int)*numPreSynNet);
8529 // cpuSnnSz.synapticInfoSize += (sizeof(short int)*(tmp_numPreSynNet+100));
8530 //
8531 // // compact connection-centric information
8532 // for (int i=0; i<numConnections; i++) {
8533 // tmp_mulSynFast[i] = mulSynFast[i];
8534 // tmp_mulSynSlow[i] = mulSynSlow[i];
8535 // }
8536 // delete[] mulSynFast;
8537 // delete[] mulSynSlow;
8538 // mulSynFast = tmp_mulSynFast;
8539 // mulSynSlow = tmp_mulSynSlow;
8540 // cpuSnnSz.networkInfoSize -= (2*sizeof(uint8_t)*numPreSynNet);
8541 // cpuSnnSz.networkInfoSize += (2*sizeof(uint8_t)*(tmp_numPreSynNet+100));
8542 //
8543 //
8544 // delete[] managerRuntimeData.preSynapticIds;
8545 // managerRuntimeData.preSynapticIds = tmp_preSynapticIds;
8546 // cpuSnnSz.synapticInfoSize -= (sizeof(SynInfo)*numPreSynNet);
8547 // cpuSnnSz.synapticInfoSize += (sizeof(SynInfo)*(tmp_numPreSynNet+100));
8548 //
8549 // numPreSynNet = tmp_numPreSynNet;
8550 // numPostSynNet = tmp_numPostSynNet;
8551 //}
8552 
8553 //The post synaptic connections are sorted based on delay here so that we can reduce storage requirement
8554 //and generation of spike at the post-synaptic side.
8555 //We also create the delay_info array has the delay_start and delay_length parameter
8556 //void SNN::reorganizeDelay()
8557 //{
8558 // for(int grpId=0; grpId < numGroups; grpId++) {
8559 // for(int nid=groupConfigs[0][grpId].StartN; nid <= groupConfigs[0][grpId].EndN; nid++) {
8560 // unsigned int jPos=0; // this points to the top of the delay queue
8561 // unsigned int cumN=managerRuntimeData.cumulativePost[nid]; // cumulativePost[] is unsigned int
8562 // unsigned int cumDelayStart=0; // Npost[] is unsigned short
8563 // for(int td = 0; td < maxDelay_; td++) {
8564 // unsigned int j=jPos; // start searching from top of the queue until the end
8565 // unsigned int cnt=0; // store the number of nodes with a delay of td;
8566 // while(j < managerRuntimeData.Npost[nid]) {
8567 // // found a node j with delay=td and we put
8568 // // the delay value = 1 at array location td=0;
8569 // if(td==(tmp_SynapticDelay[cumN+j]-1)) {
8570 // assert(jPos<managerRuntimeData.Npost[nid]);
8571 // swapConnections(nid, j, jPos);
8572 //
8573 // jPos=jPos+1;
8574 // cnt=cnt+1;
8575 // }
8576 // j=j+1;
8577 // }
8578 //
8579 // // update the delay_length and start values...
8580 // managerRuntimeData.postDelayInfo[nid*(maxDelay_+1)+td].delay_length = cnt;
8581 // managerRuntimeData.postDelayInfo[nid*(maxDelay_+1)+td].delay_index_start = cumDelayStart;
8582 // cumDelayStart += cnt;
8583 //
8584 // assert(cumDelayStart <= managerRuntimeData.Npost[nid]);
8585 // }
8586 //
8587 // // total cumulative delay should be equal to number of post-synaptic connections at the end of the loop
8588 // assert(cumDelayStart == managerRuntimeData.Npost[nid]);
8589 // for(unsigned int j=1; j < managerRuntimeData.Npost[nid]; j++) {
8590 // unsigned int cumN=managerRuntimeData.cumulativePost[nid]; // cumulativePost[] is unsigned int
8591 // if( tmp_SynapticDelay[cumN+j] < tmp_SynapticDelay[cumN+j-1]) {
8592 // KERNEL_ERROR("Post-synaptic delays not sorted correctly... id=%d, delay[%d]=%d, delay[%d]=%d",
8593 // nid, j, tmp_SynapticDelay[cumN+j], j-1, tmp_SynapticDelay[cumN+j-1]);
8594 // assert( tmp_SynapticDelay[cumN+j] >= tmp_SynapticDelay[cumN+j-1]);
8595 // }
8596 // }
8597 // }
8598 // }
8599 //}
8600 
8601 //void SNN::swapConnections(int nid, int oldPos, int newPos) {
8602 // unsigned int cumN=managerRuntimeData.cumulativePost[nid];
8603 //
8604 // // Put the node oldPos to the top of the delay queue
8605 // SynInfo tmp = managerRuntimeData.postSynapticIds[cumN+oldPos];
8606 // managerRuntimeData.postSynapticIds[cumN+oldPos]= managerRuntimeData.postSynapticIds[cumN+newPos];
8607 // managerRuntimeData.postSynapticIds[cumN+newPos]= tmp;
8608 //
8609 // // Ensure that you have shifted the delay accordingly....
8610 // uint8_t tmp_delay = tmp_SynapticDelay[cumN+oldPos];
8611 // tmp_SynapticDelay[cumN+oldPos] = tmp_SynapticDelay[cumN+newPos];
8612 // tmp_SynapticDelay[cumN+newPos] = tmp_delay;
8613 //
8614 // // update the pre-information for the postsynaptic neuron at the position oldPos.
8615 // SynInfo postInfo = managerRuntimeData.postSynapticIds[cumN+oldPos];
8616 // int post_nid = GET_CONN_NEURON_ID(postInfo);
8617 // int post_sid = GET_CONN_SYN_ID(postInfo);
8618 //
8619 // SynInfo* preId = &(managerRuntimeData.preSynapticIds[managerRuntimeData.cumulativePre[post_nid]+post_sid]);
8620 // int pre_nid = GET_CONN_NEURON_ID((*preId));
8621 // int pre_sid = GET_CONN_SYN_ID((*preId));
8622 // int pre_gid = GET_CONN_GRP_ID((*preId));
8623 // assert (pre_nid == nid);
8624 // assert (pre_sid == newPos);
8625 // *preId = SET_CONN_ID( pre_nid, oldPos, pre_gid);
8626 //
8627 // // update the pre-information for the postsynaptic neuron at the position newPos
8628 // postInfo = managerRuntimeData.postSynapticIds[cumN+newPos];
8629 // post_nid = GET_CONN_NEURON_ID(postInfo);
8630 // post_sid = GET_CONN_SYN_ID(postInfo);
8631 //
8632 // preId = &(managerRuntimeData.preSynapticIds[managerRuntimeData.cumulativePre[post_nid]+post_sid]);
8633 // pre_nid = GET_CONN_NEURON_ID((*preId));
8634 // pre_sid = GET_CONN_SYN_ID((*preId));
8635 // pre_gid = GET_CONN_GRP_ID((*preId));
8636 // assert (pre_nid == nid);
8637 // assert (pre_sid == oldPos);
8638 // *preId = SET_CONN_ID( pre_nid, newPos, pre_gid);
8639 //}
8640 
8641 // set one specific connection from neuron id 'src' to neuron id 'dest'
8642 //inline void SNN::setConnection(int srcGrp, int destGrp, unsigned int src, unsigned int dest, float synWt,
8643 // float maxWt, uint8_t dVal, int connProp, short int connId) {
8644 // assert(dest<=CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
8645 // assert((dVal >=1) && (dVal <= maxDelay_));
8646 //
8647 // // adjust sign of weight based on pre-group (negative if pre is inhibitory)
8648 // synWt = isExcitatoryGroup(srcGrp) ? fabs(synWt) : -1.0*fabs(synWt);
8649 // maxWt = isExcitatoryGroup(srcGrp) ? fabs(maxWt) : -1.0*fabs(maxWt);
8650 //
8651 // // we have exceeded the number of possible connection for one neuron
8652 // if(managerRuntimeData.Npost[src] >= groupConfigs[0][srcGrp].numPostSynapses) {
8653 // KERNEL_ERROR("setConnection(%d (Grp=%s), %d (Grp=%s), %f, %d)", src, groupInfo[srcGrp].Name.c_str(),
8654 // dest, groupInfo[destGrp].Name.c_str(), synWt, dVal);
8655 // KERNEL_ERROR("Large number of postsynaptic connections established (%d), max for this group %d.", managerRuntimeData.Npost[src], groupConfigs[0][srcGrp].numPostSynapses);
8656 // exitSimulation(1);
8657 // }
8658 //
8659 // if(managerRuntimeData.Npre[dest] >= groupConfigs[0][destGrp].numPreSynapses) {
8660 // KERNEL_ERROR("setConnection(%d (Grp=%s), %d (Grp=%s), %f, %d)", src, groupInfo[srcGrp].Name.c_str(),
8661 // dest, groupInfo[destGrp].Name.c_str(), synWt, dVal);
8662 // KERNEL_ERROR("Large number of presynaptic connections established (%d), max for this group %d.", managerRuntimeData.Npre[dest], groupConfigs[0][destGrp].numPreSynapses);
8663 // exitSimulation(1);
8664 // }
8665 //
8666 // int p = managerRuntimeData.Npost[src];
8667 //
8668 // assert(managerRuntimeData.Npost[src] >= 0);
8669 // assert(managerRuntimeData.Npre[dest] >= 0);
8670 // assert((src * maxNumPostSynGrp + p) / numN < maxNumPostSynGrp); // divide by numN to prevent INT overflow
8671 //
8672 // unsigned int post_pos = managerRuntimeData.cumulativePost[src] + managerRuntimeData.Npost[src];
8673 // unsigned int pre_pos = managerRuntimeData.cumulativePre[dest] + managerRuntimeData.Npre[dest];
8674 //
8675 // assert(post_pos < numPostSynNet);
8676 // assert(pre_pos < numPreSynNet);
8677 //
8678 // //generate a new postSynapticIds id for the current connection
8679 // managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID(dest, managerRuntimeData.Npre[dest], destGrp);
8680 // tmp_SynapticDelay[post_pos] = dVal;
8681 //
8682 // managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID(src, managerRuntimeData.Npost[src], srcGrp);
8683 // managerRuntimeData.wt[pre_pos] = synWt;
8684 // managerRuntimeData.maxSynWt[pre_pos] = maxWt;
8685 // managerRuntimeData.connIdsPreIdx[pre_pos] = connId;
8686 //
8687 // bool synWtType = GET_FIXED_PLASTIC(connProp);
8688 //
8689 // if (synWtType == SYN_PLASTIC) {
8690 // sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
8691 // managerRuntimeData.Npre_plastic[dest]++;
8692 // // homeostasis
8693 // if (groupConfigs[0][destGrp].WithHomeostasis && groupConfigs[0][destGrp].homeoId ==-1)
8694 // groupConfigs[0][destGrp].homeoId = dest; // this neuron info will be printed
8695 // }
8696 //
8697 // managerRuntimeData.Npre[dest] += 1;
8698 // managerRuntimeData.Npost[src] += 1;
8699 //
8700 // groupInfo[srcGrp].numPostConn++;
8701 // groupInfo[destGrp].numPreConn++;
8702 //
8703 // if (managerRuntimeData.Npost[src] > groupInfo[srcGrp].maxPostConn)
8704 // groupInfo[srcGrp].maxPostConn = managerRuntimeData.Npost[src];
8705 // if (managerRuntimeData.Npre[dest] > groupInfo[destGrp].maxPreConn)
8706 // groupInfo[destGrp].maxPreConn = managerRuntimeData.Npre[src];
8707 //}
#define KERNEL_ERROR_POISSON_2
Definition: error_code.h:194
float * maxSynWt
maximum synaptic weight for a connection
float * voltage
membrane potential for each regular neuron
Class for generating Poisson spike trains.
Definition: poisson_rate.h:88
#define KERNEL_ERROR_SRC_GRP_CONN
Definition: error_code.h:164
bool WithDA_MOD
True if at least one connection group is WithDA_MOD.
float rGABAb
multiplication factor for rise time of GABAb
float * randNum
firing random number. max value is 10,000
float ALPHA_MINUS_EXC
published by GroupConfig
double z
float mulSynFast
factor to be applied to either gAMPA or gGABAa
int numNSpikeGen
number of poisson neurons generating spikes based on callback functions
FILE * getSpikeFileId()
returns a pointer to the spike file
int * synSpikeTime
stores the last spike time of a synapse
#define SET_CONN_PRESENT(a)
x86/x64 Multi Core Processor (LN20201016)
void setNeuronParametersLIF(int grpId, int tau_m, int tau_ref, float vTh, float vReset, double minRmem, double maxRmem)
Sets neuron parameters for a group of LIF spiking neurons.
float decayNE
decay rate for Noradrenaline, published by GroupConfig
#define KERNEL_ERROR_UNKNOWN_ICALC
Definition: error_code.h:176
int NM_PLC
published by GroupConfig
bool isPoissonGroup(int gGrpId)
Definition: snn.h:709
virtual int nextSpikeTime(SNN *s, int grpId, int i, int currentTime, int lastScheduledSpikeTime, int endOfTimeSlice)
controls spike generation using a callback mechanism
int grpId
corresponding global group Id
Definition: spike_buffer.h:93
float TAU_MINUS_INV_EXC
the inverse of time constant minus, if the exponential or timing-based E-STDP curve is used ...
Developer mode, for developing and debugging code.
STDPCurve WithISTDPcurve
published by GroupConfig
bool isSimulationWithGABAbRise()
Definition: snn.h:766
float wstptaux[NM_NE+3]
Array size = last index + 1 + additional elementsnorm + base.
float KAPPA
published by GroupConfig
#define KERNEL_ERROR_INVALID_START
Definition: error_code.h:143
STDPType WithISTDPtype
the type of I-STDP (STANDARD or DA_MOD)
#define KERNEL_ERROR_CONN_MISSING3
Definition: error_code.h:203
void schedule(int neurId, int grpId, unsigned short int delay)
Schedule a spike.
int createGroupLIF(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a group of LIF spiking neurons.
RangeDelay getDelayRange(short int connId)
returns the RangeDelay struct of a connection
int numNAssigned
number of total neurons assigned to the local network
float base5HT
baseline concentration of Serotonin
bool WithSTDP
True if at least one connection group is WithSTDP.
void findWavefrontPath(std::vector< int > &path, std::vector< float > &eligibility, int netId, int grpId, int startNId, int goalNId)
#define UNKNOWN_LOGGER_ERROR
Definition: error_code.h:95
Iterator to loop over the scheduled spikes at a certain delay.
Definition: spike_buffer.h:99
A struct for retrieving STDP related information of a connection.
bool isRecording()
returns recording status
int numNPois
number of poisson neurons
float LAMBDA
published by GroupConfig
void pushNeuronState(int neurId, float V, float U, float I)
inserts a (time,neurId) tupel into the D Neuron State vector
double x
#define KERNEL_ERROR_NUMX
Definition: error_code.h:149
NeuronMonitor * getNeuronMonitor(int grpId)
Returns pointer to existing NeuronMonitor object, NULL else.
int numPreSynNet
the total number of pre-connections in a network
float releaseDP
release per spike for Dopaamine
int GtoLOffset
published by GroupConfigMD
#define KERNEL_ERROR_INVALID_CONN2
Definition: error_code.h:113
void updateGroupMonitor(int grpId=ALL)
access group status (currently the concentration of neuromodulator)
float W_PLC
published by GroupConfig
float GAMMA
the turn over point if the timing-based E-STDP curve is used
#define KERNEL_ERROR_COMPARTMENT_DISABLED2
Definition: error_code.h:233
float timeStep
inverse of simNumStepsPerMs
#define ALL
CARLsim common definitions.
float homeostasisScale
published by GroupConfig
float dAMPA
multiplication factor for decay time of AMPA conductance (gAMPA[i] *= dAMPA)
FILE * getNeuronFileId()
returns a pointer to the neuron state file
unsigned int * timeTableD1
firing table, only used in CPU_MODE currently
int * nSpikeCnt
homeostatic plasticity variables
#define KERNEL_ERROR_INVALID_GROUP_INFO
Definition: error_code.h:161
int numNRateGen
number of poisson neurons generating spikes based on firing rate
#define KERNEL_ERROR_INVALID_HEADER
Definition: error_code.h:140
#define KERNEL_ERROR_INVALID_CONN
Definition: error_code.h:107
int getSimTimeSec()
Definition: snn.h:668
void setNM4weighted(int grpId, IcalcType type, float wDA, float w5HT, float wACh, float wNE, float wNorm, float wBase)
unsigned int spikeCountD1Sec
the total number of spikes with axonal delay == 1 in 1 second, used in CPU_MODE currently ...
model is run on GPU card(s)
GroupNeuromodulatorInfo getGroupNeuromodulatorInfo(int grpId)
#define KERNEL_ERROR_GROUP_MISMATCH
Definition: error_code.h:137
unsigned int spikeCountD1
the total number of spikes with anxonal delay == 1 in a simulation, used in CPU_MODE currently ...
int getNumGroups()
Definition: snn.h:655
void setISTDP(int preGrpId, int postGrpId, bool isSet, STDPType type, STDPCurve curve, float ab1, float ab2, float tau1, float tau2)
Set the inhibitory spike-timing-dependent plasticity (STDP) with anti-hebbian curve for a neuron grou...
int numN2msDelay
number of neurons with maximum out going axonal delay >= 2 ms
#define TIMING_COUNT
float DELTA
published by GroupConfig
unsigned int spikeCountExtRxD2
the number of external spikes with axonal delay > 1 in a simulation, used in CPU_MODE currently ...
ConnectionMonitor * setConnectionMonitor(int grpIdPre, int grpIdPost, FILE *fid)
sets up a network monitor registered with a callback to process the spikes.
bool activeACh
flag for Acetylcholine
bool compareDelay(const ConnectionInfo &first, const ConnectionInfo &second)
#define KERNEL_ERROR_NO_CATAGORY
Definition: error_code.h:119
#define KERNEL_ERROR_STDP_HOMEO_INCONSIST
Definition: error_code.h:239
UpdateInterval
Update frequency for weights.
float ALPHA_MINUS_INB
the amplitude of alpha minus, if the exponential I-STDP curve is used
float GAMMA
published by GroupConfig
#define GROUP_ID_MASK
bool isRecording()
returns recording status
bool allocated
true if all data has been allocated
int getLastUpdated()
returns timestamp of last GroupMonitor update
float decayDP
decay rate for Dopaamine, published by GroupConfig
bool isBufferBig()
returns true if spike buffer is close to maxAllowedBufferSize
runtime data is allocated on CPU (main) memory
float TAU_PLUS_INV_INB
published by GroupConfig
float * gAMPA
conductance of gAMPA
bool WithISTDP
enable I-STDP flag
#define KERNEL_WARN(formatc,...)
int numSynNet
number of total synaptic connections in the global network
void setNeuromodulator(int grpId, float baseDP, float tauDP, float base5HT, float tau5HT, float baseACh, float tauACh, float baseNE, float tauNE)
Sets baseline concentration and decay time constant of neuromodulators (DP, 5HT, ACh, NE) for a neuron group.
#define MAX_NEURON_MON_GRP_SZIE
#define NEURON_MAX_FIRING_RATE
SpikeMonMode getMode()
returns recording mode
std::vector< float > getConductanceNMDA(int grpId)
float DELTA
the range of inhibitory LTD if the pulse I-STDP curve is used
int * lif_tau_m
parameters for a LIF spiking group
unsigned int * cumulativePre
void setNM4STP(int grpId, float wSTP_U[], float wSTP_tau_u[], float wSTP_tau_x[])
int maxDelay
maximum axonal delay in the gloabl network
bool updateDelays(int gGrpIdPre, int gGrpIdPost, std::vector< std::tuple< int, int, uint8_t >> connDelays)
void startTesting(bool shallUpdateWeights=true)
enters a testing phase, where all weight updates are disabled
bool activeNE
flag for Noradrenaline
NeuronMonitorCore * getNeuronMonitorCore(int grpId)
bool isLIF
True = a LIF spiking group.
static const char * IcalcType_string[]
unsigned * firingTimesD2
stores the actual firing time
bool isGroupWithHomeostasis(int grpId)
returns whether group has homeostasis enabled (true) or not (false)
unsigned int spikeCountD2
the total number of spikes with anxonal delay > 1 in a simulation, used in CPU_MODE currently ...
#define MAX_NUM_COMP_CONN
int numN
number of neurons in th local network
int gEndN
published by GroupConfigMD
void exitSimulation(int val=1)
deallocates all dynamical structures and exits
#define CPU_RUNTIME_BASE
short int connId
connectID of the element in the linked list
#define SYN_FIXED
float wstptauu[NM_NE+3]
Array size = last index + 1 + additional elementsnorm + base.
void setLastUpdated(long int lastUpdate)
sets timestamp of last SpikeMonitor update
#define KERNEL_ERROR_NO_PARTION_ASSIGNED
Definition: error_code.h:125
float sNMDA
scaling factor for NMDA amplitude
#define MAX_GRP_PER_SNN
#define KERNEL_ERROR(formatc,...)
a point in 3D space
std::vector< float > getConductanceGABAb(int grpId)
float decayNE
decay rate for Noradrenaline
integrationMethod_t simIntegrationMethod
integration method (forward-Euler or Fourth-order Runge-Kutta)
float ALPHA_PLUS_EXC
published by GroupConfig
STDPType
STDP flavors.
void scaleWeights(short int connId, float scale, bool updateWeightRange=false)
void setGroupFileId(FILE *groupFileId)
sets pointer to group data file
float ALPHA_MINUS_INB
published by GroupConfig
#define KERNEL_ERROR_POISSON_1
Definition: error_code.h:191
int numN
published by GroupConfig
float decay5HT
decay rate for Serotonin, published by GroupConfig
float * wt
stores the weight change of a synaptic connection
Class SpikeMonitor.
void printEntrails(char *buffer, unsigned length, int gGrpIdPre, int gGrpIdPost)
void pushData(int time, float data)
inserts group data (time, value) into the vectors , if monitor is in DA_MODE
long int getLastUpdated()
returns timestamp of last SpikeMonitor update
#define NUM_CPU_CORES
LoggerMode
Logger modes.
int createGroup(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a group of Izhikevich spiking neurons.
float BETA_LTP
the amplitude of inhibitory LTP if the pulse I-STDP curve is used
float decayDP
decay rate for Dopaamine
used for relaying callback to ConnectionGenerator
Definition: callback_core.h:91
bool active5HT
flag for Serotonin
float baseNE
baseline concentration of Noradrenaline, published by GroupConfig
int numPreSynapses
the total number of pre-connections of a group, published by GroupConfigMD
void updateNeuronMonitor(int grpId=ALL)
copy required neuron state values from ??? buffer to ??? buffer
bool isExcitatoryGroup(int gGrpId)
Definition: snn.h:707
GroupMonitor * setGroupMonitor(int grpId, FILE *fid, int mode=0)
sets up a group monitor registered with a callback to process the spikes.
int netId
published by GroupConfigMD
The configuration of a connection.
int NM_PKA
published by GroupConfig
int ** extFiringTableD1
external firing table, only used on GPU
bool WithESTDP
enable E-STDP flag
void setLogsFp(FILE *fpInf=NULL, FILE *fpErr=NULL, FILE *fpDeb=NULL, FILE *fpLog=NULL)
Sets the file pointers for all log files file pointer NULL means don&#39;t change it. ...
float dNMDA
multiplication factor for decay time of NMDA
#define KERNEL_ERROR_UNKNOWN_SIM_MODE
Definition: error_code.h:122
bool isSpikeGenerator
published by GroupConfig
long int getLastUpdated()
returns timestamp of last NeuronMonitor update
short int getConnectId(int grpIdPre, int grpIdPost)
find connection ID based on pre-post group pair, O(N)
#define LARGE_SPIKE_MON_GRP_SIZE
float releaseDP
release per spike for Dopaamine
void setConnectFileId(FILE *connFileId)
sets pointer to connection file
bool active5HT
flag for Serotonin
unsigned short * Npre_plastic
stores the number of plastic input connections to a neuron
int gGrpId
published by GroupConfigMD
#define KERNEL_ERROR_NEURON_MISMATCH
Definition: error_code.h:134
Circular buffer for delivering spikes.
Definition: spike_buffer.h:67
SimMode
simulation mode
Grid3D getGroupGrid3D(int grpId)
bool hasExternalConnect
published by GroupConfigMD
void setupNetwork()
build the network
int numPostSynapses
the total number of post-connections of a group, published by GroupConfigMD
unsigned int spikeCountLastSecLeftD2
the nubmer of spike left in the last second, used in CPU_MODE currently
STDPCurve
STDP curves.
#define KERNEL_INFO(formatc,...)
A struct to arrange neurons on a 3D grid (a primitive cubic Bravais lattice with cubic side length 1)...
static const unsigned int MAJOR_VERSION
major release version, as in CARLsim X
Definition: snn.h:161
int simNumStepsPerMs
number of steps per 1 millisecond
bool WithSTDP
published by GroupConfig
bool isSimulationWithCOBA()
Definition: snn.h:763
void saveSimulation(FILE *fid, bool saveSynapseInfo=false)
stores the pre and post synaptic neuron ids with the weight and delay
float dGABAb
multiplication factor for decay time of GABAb
bool isSpikeGenFunc
published by GroupConfig
#define GET_FIXED_PLASTIC(a)
float BETA_LTP
published by GroupConfig
void setLastUpdated(unsigned int lastUpdate)
sets timestamp of last GroupMonitor update
float baseDP
baseline concentration of Dopamine, published by GroupConfig
void setSpikeFileId(FILE *spikeFileId)
sets pointer to spike file
int lGrpId
published by GroupConfigMD
void updateConnectionMonitor(short int connId=ALL)
polls connection weights
used for relaying callback to SpikeGenerator
Definition: callback_core.h:71
int numN1msDelay
number of neurons with maximum out going axonal delay = 1 ms
float decayACh
decay rate for Acetylcholine, published by GroupConfig
float rNMDA
multiplication factor for rise time of NMDA
SpikeMonitor * getSpikeMonitor(int grpId)
Returns pointer to existing SpikeMonitor object, NULL else.
#define KERNEL_ERROR_CONN_MISSING4
Definition: error_code.h:209
NeuronMonitor * setNeuronMonitor(int gid, FILE *fid)
sets up a neuron monitor registered with a callback to process the neuron state values, there can only be one NeuronMonitor per group
std::string getGroupName(int grpId)
int maxDelay
maximum axonal delay in the gloabl network
void updateSpikeMonitor(int grpId=ALL)
copy required spikes from firing buffer to spike buffer
float release5HT
release per spike for Serotonin
short int * connIdsPreIdx
connectId, per synapse, presynaptic cumulative indexing
unsigned int spikeCount
the total number of spikes in a simulation, used in CPU_MODE currently
#define KERNEL_ERROR_LIF_PARAMS_NOT_SET
Definition: error_code.h:173
#define KERNEL_ERROR_CONNMON_SET
Definition: error_code.h:212
protein kinase/phospholiphase controlled LTP/LPD adopted from Nadim & Bucher (2014) ...
float mulSynSlow
factor to be applied to either gNMDA or gGABAb
#define GPU_RUNTIME_BASE
bool WithNM4STP
if group has targeted STP
mode in which spike information is collected in AER format
float decay5HT
decay rate for Serotonin
int numNPois
number of poisson neurons in the global network
bool activeDP
flag for Dopaamine
dopamine-modulated STDP, nearest-neighbor
long int getAccumTime()
returns the total accumulated time
The configuration of a compartmental connection.
#define TARGET_GABAb
float OMEGA
published by GroupConfig
#define KERNEL_ERROR_INVALID_CONN3
Definition: error_code.h:116
#define KERNEL_ERROR_CONN_MISSING5
Definition: error_code.h:215
bool isSimulationWithNMDARise()
Definition: snn.h:765
#define KERNEL_ERROR_COMPARTMENT_DISABLED
Definition: error_code.h:230
bool with_GABAb_rise
replaces sim_with_GABAb_rise
unsigned short * Npost
stores the number of output connections from a neuron.
#define TARGET_GABAa
int getNumNeurons()
Returns the number of neurons for which to generate Poisson spike trains.
SpikeMonitorCore * getSpikeMonitorCore(int grpId)
#define NO_LOGGER_DIR_ERROR
Definition: error_code.h:96
int numNExcPois
number of excitatory poisson neurons in the global network
#define KERNEL_ERROR_GROUPMON_SET
Definition: error_code.h:206
int8_t MaxDelay
published by GroupConfigMD
bool compareSrcNeuron(const ConnectionInfo &first, const ConnectionInfo &second)
bool withParamModel_9
False = 4 parameter model; 1 = 9 parameter model.
float avgTimeScaleInv
published by GroupConfig
float decayACh
decay rate for Acetylcholine
bool WithSTDP
published by GroupConfig
double getRFDist3D(const RadiusRF &radius, const Point3D &pre, const Point3D &post)
checks whether a point pre lies in the receptive field for point post
#define KERNEL_ERROR_COMP_CONNS_EXCEEDED2
Definition: error_code.h:221
User mode, for experiment-oriented simulations.
uint8_t * getDelays(int gGrpIdPre, int gGrpIdPost, int &numPreN, int &numPostN)
Returns the delay information for all synaptic connections between a pre-synaptic and a post-synaptic...
unsigned int Type
published by GroupConfig
#define NUM_SYNAPSE_BITS
#define KERNEL_ERROR_MAX_SYN_DELAY
Definition: error_code.h:227
bool WithHomeostasis
published by GroupConfig
void setSpikeRate(int grpId, PoissonRate *spikeRate, int refPeriod)
Sets the Poisson spike rate for a group. For information on how to set up spikeRate, see Section Poisson spike generators in the Tutorial.
#define SYN_PLASTIC
float base5HT
baseline concentration of Serotonin, published by GroupConfig
IcalcType icalcType
published by GroupConfig
float connProbability
connection probability
conductance
Class GroupMonitor.
IcalcType
input current calculation
int * lastSpikeTime
stores the last spike time of a neuron
float * gGABAa
conductance of gGABAa
SynInfo * preSynapticIds
void biasWeights(short int connId, float bias, bool updateWeightRange=false)
void writeConnectFileSnapshot(int simTimeMs, std::vector< std::vector< float > > wts)
writes each snapshot to connect file
STDPType WithISTDPtype
published by GroupConfig
int getNumSynapticConnections(short int connectionId)
gets number of connections associated with a connection ID
#define MAX_TIME_SLICE
unsigned int spikeCountD2Sec
the total number of spikes with axonal delay > 1 in 1 second, used in CPU_MODE currently ...
SpikeIterator back()
pointer to the back of the spike buffer
float releaseACh
release per spike for Acetylcholine
STDPCurve WithISTDPcurve
the I-STDP curve
short int connect(int gIDpre, int gIDpost, const std::string &_type, float initWt, float maxWt, float prob, uint8_t minDelay, uint8_t maxDelay, RadiusRF radius, float mulSynFast, float mulSynSlow, bool synWtType)
make from each neuron in grpId1 to &#39;numPostSynapses&#39; neurons in grpId2
Definition: snn_manager.cpp:97
unsigned short * Npre
stores the number of input connections to a neuron
integrationMethod_t
Integration methods.
Showtime mode, will only output warnings and errors.
void setNeuronParameters(int grpId, float izh_a, float izh_a_sd, float izh_b, float izh_b_sd, float izh_c, float izh_c_sd, float izh_d, float izh_d_sd)
Sets the Izhikevich parameters a, b, c, and d of a neuron group.
float wtChangeDecay
the wtChange decay
a range struct for synaptic delays
float BETA_LTD
the amplitude of inhibitory LTD if the pulse I-STDP curve is used
void setCompartmentParameters(int grpId, float couplingUp, float couplingDown)
Coupling constants for the compartment are set using this method.
float dGABAa
multiplication factor for decay time of GABAa
void setHomeoBaseFiringRate(int groupId, float baseFiring, float baseFiringSD)
Sets homeostatic target firing rate (enforced through homeostatic synaptic scaling) ...
int Noffset
the offset of spike generator (poisson) neurons [0, numNPois), published by GroupConfigMD ...
float baseACh
baseline concentration of Acetylcholine
STDPType WithESTDPtype
the type of E-STDP (STANDARD or DA_MOD)
#define GET_CONN_NEURON_ID(val)
float TAU_MINUS_INV_INB
published by GroupConfig
unsigned int spikeCountExtRxD1
the number of external spikes with axonal delay == 1 in a simulation, used in CPU_MODE currently ...
std::vector< float > getConductanceGABAa(int grpId)
The configuration of a group.
int gsId
group id and synapse id
SNN(const std::string &name, SimMode preferredSimMode, LoggerMode loggerMode, int randSeed)
SNN Constructor.
Definition: snn_manager.cpp:78
bool sim_with_GABAb_rise
a flag to inform whether to compute GABAb rise time
int nId
neuron id
float releaseNE
release per spike for Noradrenaline
void setNeuronFileId(FILE *neuronFileId)
sets pointer to Neuron file
int runNetwork(int _nsec, int _nmsec, bool printRunSummary)
run the simulation for n sec
float ALPHA_PLUS_INB
the amplitude of alpha plus, if the exponential I-STDP curve is used
#define KERNEL_ERROR_NUMZ
Definition: error_code.h:155
unsigned int * timeTableD2
firing table, only used in CPU_MODE currently
A struct for retrieving neuromodulator information of a group.
int numNReg
number of regular (spking) neurons
RangeWeight getWeightRange(short int connId)
returns RangeWeight struct of a connection
#define MAX_SIMULATION_TIME
#define GET_CONN_SYN_ID(val)
bool activeDP
flag for Dopaamine
int createSpikeGeneratorGroup(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a spike generator group (dummy-neurons, not Izhikevich spiking neurons)
Point3D getNeuronLocation3D(int neurId)
void setIntegrationMethod(integrationMethod_t method, int numStepsPerMs)
Sets the integration method and the number of integration steps per 1ms simulation time step...
#define KERNEL_ERROR_DEST_GRP_CONN
Definition: error_code.h:167
float avgTimeScale_decay
published by GroupConfig
#define KERNEL_ERROR_STDP_NO_IN_PLASTIC
Definition: error_code.h:236
int numNInhReg
number of regular inhibitory neurons in the global network
int numN
number of neurons in the global network
int lEndN
published by GroupConfigMD
float * gGABAb
conductance of gGABAb
std::vector< float > getConductanceAMPA(int grpId)
ConnSTDPInfo getConnSTDPInfo(short int connId)
#define POISSON_NEURON
#define KERNEL_DEBUG(formatc,...)
#define INT_MAX
DelayInfo * postDelayInfo
delay information
Class ConnectionMonitor.
float baseACh
baseline concentration of Acetylcholine, published by GroupConfig
int numNInhPois
number of inhibitory poisson neurons in the global network
void reset(int minDelay, int maxDelay)
Reset buffer data.
int getNeuronId(int gGrpId, Point3D location)
the inverse of getNeuronLocation3D
int ** extFiringTableD2
external firing table, only used on GPU
int numGroups
number of local groups in this local network
#define IS_INHIBITORY_TYPE(type)
integrationMethod_t simIntegrationMethod
integration method (forward-Euler or Fourth-order Runge-Kutta)
int LtoGOffset
published by GroupConfigMD
void setExternalCurrent(int grpId, const std::vector< float > &current)
injects current (mA) into the soma of every neuron in the group
#define MAX_SPIKE_MON_BUFFER_SIZE
a range struct for synaptic weight magnitudes
#define GET_CONN_GRP_ID(val)
bool WithISTDP
published by GroupConfig
float STP_A
published by GroupConfig
Contains all of CARLsim&#39;s core functionality.
Definition: snn.h:138
#define KERNEL_ERROR_COMP_CONNS_EXCEEDED
Definition: error_code.h:218
float STP_U
published by GroupConfig
ComputingBackend
computing backend
#define KERNEL_ERROR_IZH_PARAMS_NOT_SET
Definition: error_code.h:170
#define IS_EXCITATORY_TYPE(type)
int numNExcReg
number of regular excitatory neurons in the global network
float BETA_LTD
published by GroupConfig
float STP_tau_x_inv
published by GroupConfig
#define ID_OVERFLOW_ERROR
Definition: error_code.h:98
int getSimTimeMs()
Definition: snn.h:669
float LAMBDA
the range of inhibitory LTP if the pulse I-STDP curve is used
void setESTDP(int preGrpId, int postGrpId, bool isSet, STDPType type, STDPCurve curve, float alphaPlus, float tauPlus, float alphaMinus, float tauMinus, float gamma)
Set the spike-timing-dependent plasticity (STDP) for a neuron group.
float TAU_PLUS_INV_EXC
published by GroupConfig
#define KERNEL_ERROR_CONN_MISSING
Definition: error_code.h:197
SynInfo * postSynapticIds
10 bit syn id, 22 bit neuron id, ordered based on delay
int lStartN
published by GroupConfigMD
unsigned int * cumulativePost
int numConnections
number of local connections in this local network
void stopTesting()
exits a testing phase, making weight updates possible again
unsigned int maxSpikesD2
the estimated maximum number of spikes with delay >= 2 in a network
short int connectCompartments(int grpIdLower, int grpIdUpper)
Custom mode, the user can set the location of all the file pointers.
void setHomeostasis(int grpId, bool isSet, float homeoScale, float avgTimeScale)
Sets the homeostasis parameters. g is the grpID, enable=true(false) enables(disables) homeostasis...
used to initialize by default constructor
#define MAX_SYN_DELAY
int numPostSynNet
the total number of post-connections in a network
#define LONG_SPIKE_MON_DURATION
runtime spike routing table entry
the update interval will be 10 ms, which is 100Hz update frequency
unsigned int type
float TAU_PLUS_INV_INB
the inverse of tau plus, if the exponential I-STDP curve is used
#define ANY
used for create* method to specify any GPU or a specific GPU
unsigned int * spikeGenBits
void pushAER(int time, int neurId)
inserts a (time,neurId) tupel into the 2D spike vector
CPU multithreading subroutine (that takes single argument) struct argument.
std::vector< std::vector< float > > getWeightMatrix2D(short int connId)
short int * grpIds
bool isRecording()
returns recording status
#define TARGET_NMDA
void setSTP(int grpId, bool isSet, float STP_U, float STP_tau_u, float STP_tau_x)
Sets STP params U, tau_u, and tau_x of a neuron group (pre-synaptically) CARLsim implements the short...
int neurId
corresponding global neuron Id
Definition: spike_buffer.h:92
int simNumStepsPerMs
number of steps per 1 millisecond
bool with_NMDA_rise
replaces sim_with_NMDA_rise
int numGroupsAssigned
number of groups assigned to this local network
int gStartN
published by GroupConfigMD
void setSpikeGenerator(int grpId, SpikeGeneratorCore *spikeGenFunc)
sets up a spike generator
bool isPoint3DinRF(const RadiusRF &radius, const Point3D &pre, const Point3D &post)
int numComp
number of compartmental neurons
standard exponential curve
int getGroupId(std::string grpName)
unsigned int icalcType
int getSimTime()
Definition: snn.h:667
GroupMonitor private core implementation.
float TAU_MINUS_INV_EXC
published by GroupConfig
static const char * loggerMode_string[]
#define KERNEL_ERROR_UNKNOWN_FILE_SIGNATURE
Definition: error_code.h:128
void setWeightAndWeightChangeUpdate(UpdateInterval wtANDwtChangeUpdateInterval, bool enableWtChangeDecay, float wtChangeDecay)
Sets the weight and weight change update parameters.
model is run on CPU Core(s), GPU card(s) or both
#define MAX_CONN_PER_SNN
bool WithSTDP
enable STDP flag
float baseDP
baseline concentration of Dopamine
runtime network configuration
int numNReg
number of regular (spking) neurons in the global network
std::string grpName
#define SYNAPSE_ID_MASK
float avgTimeScale
< homeostatic plasticity variables
float baseNE
baseline concentration of Noradrenaline
SpikeMonitor * setSpikeMonitor(int gid, FILE *fid)
sets up a spike monitor registered with a callback to process the spikes, there can only be one Spike...
acetylcholine
bool WithSTP
published by GroupConfig
void setLastUpdated(long int lastUpdate)
sets timestamp of last NeuronMonitor update
#define KERNEL_ERROR_INVALID_END
Definition: error_code.h:146
bool activeNE
flag for Noradrenaline
FILE * getGroupFileId()
returns a pointer to the group data file
int getNumConnections()
Definition: snn.h:652
#define KERNEL_ERROR_STDP_SYN_PLASIC
Definition: error_code.h:110
unsigned int spikeCountSec
the total number of spikes in 1 second, used in CPU_MODE currently
the update interval will be 1000 ms, which is 1Hz update frequency
float releaseACh
release per spike for Acetylcholine
float TAU_PLUS_INV_EXC
the inverse of time constant plus, if the exponential or timing-based E-STDP curve is used ...
bool isConnectionPlastic(short int connId)
returns whether synapses in connection are fixed (false) or plastic (true)
int getGroupNumNeurons(int gGrpId)
Definition: snn.h:641
void setWeight(short int connId, int neurIdPre, int neurIdPost, float weight, bool updateWeightRange=false)
sets the weight value of a specific synapse
float * gNMDA
conductance of gNMDA
float sGABAb
scaling factor for GABAb amplitude
bool FixedInputWts
published by GroupConfigMD
Silent mode, no output is generated.
void setConductances(bool isSet, int tdAMPA, int trNMDA, int tdNMDA, int tdGABAa, int trGABAb, int tdGABAb)
Sets custom values for conductance decay () or disables conductances alltogether These will be applie...
STDPCurve WithESTDPcurve
the E-STDP curve
#define MAX_NET_PER_SNN
ConnectionGeneratorCore * conn
float STP_tau_u_inv
published by GroupConfig
unsigned int nPoissonSpikes
the total number of spikes of poisson neurons, used in CPU_MODE currently
#define KERNEL_ERROR_CONN_MISSING2
Definition: error_code.h:200
#define STP_BUF_POS(nid, t, maxDelay)
#define KERNEL_ERROR_MAX_STP_DELAY
Definition: error_code.h:224
STDPCurve WithESTDPcurve
published by GroupConfig
#define SET_FIXED_PLASTIC(a)
bool WithESTDP
published by GroupConfig
void setConnectionModulation(int preGrpId, int postGrpId, IcalcType icalcType)
double y
bool activeACh
flag for Acetylcholine
#define TARGET_AMPA
#define KERNEL_ERROR_UNSUPPORTED_VERSION
Definition: error_code.h:131
void step()
advance to next time step
#define KERNEL_ERROR_GROUP_NAMES_MISMATCH
Definition: error_code.h:158
bool sim_with_NMDA_rise
a flag to inform whether to compute NMDA rise time
unsigned int maxSpikesD1
the estimated maximum number of spikes with delay == 1 in a network
float * wtChange
stores the weight change of a synaptic connection
static const unsigned int MINOR_VERSION
minor release version, as in CARLsim 2.X
Definition: snn.h:162
float releaseNE
release per spike for Noradrenaline
STDPType WithESTDPtype
published by GroupConfig
#define CHECK_CONNECTION_ID(n, total)
< Used for in the function getConnectionId
float release5HT
release per spike for Serotonin
void loadSimulation(FILE *fid)
float ALPHA_PLUS_INB
published by GroupConfig
SpikeIterator front(int stepOffset=0)
pointer to the front of the spike buffer
float ALPHA_MINUS_EXC
the amplitude of alpha minus, if the exponential or timing-based E-STDP curve is used ...
float * nextVoltage
membrane potential buffer (next/future time step) for each regular neuron
float wstpu[NM_NE+3]
Array size = last index + 1 + additional elementsnorm + base.
ConnectConfig getConnectConfig(short int connectId)
required for homeostasis
model is run on CPU core(s)
float W_PKA
published by GroupConfig
float TAU_MINUS_INV_INB
the inverse of tau minus, if the exponential I-STDP curve is used
the update interval will be 100 ms, which is 10Hz update frequency
#define KERNEL_ERROR_NUMY
Definition: error_code.h:152
A struct to specify the receptive field (RF) radius in 3 dimensions.
~SNN()
SNN Destructor.
Definition: snn_manager.cpp:87
float ALPHA_PLUS_EXC
the amplitude of alpha plus, if the exponential or timing-based E-STDP curve is used ...
float timeStep
inverse of simNumStepsPerMs