CARLsim  5.0.0
CARLsim: a GPU-accelerated SNN simulator
snn_manager.cpp
Go to the documentation of this file.
1 /* * Copyright (c) 2016 Regents of the University of California. All rights reserved.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions
5 * are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * 3. The names of its contributors may not be used to endorse or promote
15 * products derived from this software without specific prior written
16 * permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
22 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * *********************************************************************************************** *
31 * CARLsim
32 * created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran
33 * maintained by:
34 * (MA) Mike Avery <averym@uci.edu>
35 * (MB) Michael Beyeler <mbeyeler@uci.edu>,
36 * (KDC) Kristofor Carlson <kdcarlso@uci.edu>
37 * (TSC) Ting-Shuo Chou <tingshuc@uci.edu>
38 * (HK) Hirak J Kashyap <kashyaph@uci.edu>
39 *
40 * CARLsim v1.0: JM, MDR
41 * CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC
42 * CARLsim3: MB, KDC, TSC
43 * CARLsim4: TSC, HK
44 * CARLsim5: HK, JX, KC
45 *
46 * CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/
47 * Ver 12/31/2016
48 */
49 
50 #include <snn.h>
51 #include <sstream>
52 #include <algorithm>
53 
54 #include <connection_monitor.h>
56 #include <spike_monitor.h>
57 #include <spike_monitor_core.h>
58 #include <group_monitor.h>
59 #include <group_monitor_core.h>
60 #include <neuron_monitor.h>
61 #include <neuron_monitor_core.h>
62 
63 #include <spike_buffer.h>
64 #include <error_code.h>
65 
66 // \FIXME what are the following for? why were they all the way at the bottom of this file?
67 
68 #define COMPACTION_ALIGNMENT_PRE 16
69 #define COMPACTION_ALIGNMENT_POST 0
70 
74 
75 
76 // TODO: consider moving unsafe computations out of constructor
77 SNN::SNN(const std::string& name, SimMode preferredSimMode, LoggerMode loggerMode, int randSeed)
78  : networkName_(name), preferredSimMode_(preferredSimMode), loggerMode_(loggerMode),
79  randSeed_(SNN::setRandSeed(randSeed)) // all of these are const
80 {
81  // move all unsafe operations out of constructor
82  SNNinit();
83 }
84 
85 // destructor
87  if (!simulatorDeleted)
88  deleteObjects();
89 }
90 
94 
95 // make from each neuron in grpId1 to 'numPostSynapses' neurons in grpId2
96 short int SNN::connect(int grpId1, int grpId2, const std::string& _type, float initWt, float maxWt, float prob,
97  uint8_t minDelay, uint8_t maxDelay, RadiusRF radius,
98  float _mulSynFast, float _mulSynSlow, bool synWtType) {
99  //const std::string& wtType
100  int retId=-1;
101  assert(grpId1 < numGroups);
102  assert(grpId2 < numGroups);
103  assert(minDelay <= maxDelay);
104  assert(!isPoissonGroup(grpId2));
105 
106  //* \deprecated Do these ramp thingies still work?
107 // bool useRandWts = (wtType.find("random") != std::string::npos);
108 // bool useRampDownWts = (wtType.find("ramp-down") != std::string::npos);
109 // bool useRampUpWts = (wtType.find("ramp-up") != std::string::npos);
110 // uint32_t connProp = SET_INITWTS_RANDOM(useRandWts)
111 // | SET_CONN_PRESENT(1)
112 // | SET_FIXED_PLASTIC(synWtType)
113 // | SET_INITWTS_RAMPUP(useRampUpWts)
114 // | SET_INITWTS_RAMPDOWN(useRampDownWts);
115  uint32_t connProp = SET_CONN_PRESENT(1) | SET_FIXED_PLASTIC(synWtType);
116 
117  Grid3D szPre = getGroupGrid3D(grpId1);
118  Grid3D szPost = getGroupGrid3D(grpId2);
119 
120  // initialize configuration of a connection
121  ConnectConfig connConfig;
122 
123  connConfig.grpSrc = grpId1;
124  connConfig.grpDest = grpId2;
125  connConfig.initWt = initWt;
126  connConfig.maxWt = maxWt;
127  connConfig.maxDelay = maxDelay;
128  connConfig.minDelay = minDelay;
129 // newInfo->radX = (radX<0) ? MAX(szPre.x,szPost.x) : radX; // <0 means full connectivity, so the
130 // newInfo->radY = (radY<0) ? MAX(szPre.y,szPost.y) : radY; // effective group size is Grid3D.x. Grab
131 // newInfo->radZ = (radZ<0) ? MAX(szPre.z,szPost.z) : radZ; // the larger of pre / post to connect all
132  connConfig.connRadius = radius;
133  connConfig.mulSynFast = _mulSynFast;
134  connConfig.mulSynSlow = _mulSynSlow;
135  connConfig.connProp = connProp;
136  connConfig.connProbability = prob;
137  connConfig.type = CONN_UNKNOWN;
138  connConfig.connectionMonitorId = -1;
139  connConfig.connId = -1;
140  connConfig.conn = NULL;
141  connConfig.numberOfConnections = 0;
142 
143  if ( _type.find("random") != std::string::npos) {
144  connConfig.type = CONN_RANDOM;
145  }
146  //so you're setting the size to be prob*Number of synapses in group info + some standard deviation ...
147  else if ( _type.find("full-no-direct") != std::string::npos) {
148  connConfig.type = CONN_FULL_NO_DIRECT;
149  }
150  else if ( _type.find("full") != std::string::npos) {
151  connConfig.type = CONN_FULL;
152  }
153  else if ( _type.find("one-to-one") != std::string::npos) {
154  connConfig.type = CONN_ONE_TO_ONE;
155  } else if ( _type.find("gaussian") != std::string::npos) {
156  connConfig.type = CONN_GAUSSIAN;
157  } else {
158  KERNEL_ERROR("Invalid connection type (should be 'random', 'full', 'one-to-one', 'full-no-direct', or 'gaussian')");
159  exitSimulation(-1);
160  }
161 
162  // assign connection id
163  assert(connConfig.connId == -1);
164  connConfig.connId = numConnections;
165 
166  KERNEL_DEBUG("CONNECT SETUP: connId=%d, mulFast=%f, mulSlow=%f", connConfig.connId, connConfig.mulSynFast, connConfig.mulSynSlow);
167 
168  // store the configuration of a connection
169  connectConfigMap[numConnections] = connConfig; // connConfig.connId == numConnections
170 
171  assert(numConnections < MAX_CONN_PER_SNN); // make sure we don't overflow connId
172  numConnections++;
173 
174  return (numConnections - 1);
175 }
176 
177 // make custom connections from grpId1 to grpId2
178 short int SNN::connect(int grpId1, int grpId2, ConnectionGeneratorCore* conn, float _mulSynFast, float _mulSynSlow,
179  bool synWtType) {
180  int retId=-1;
181 
182  assert(grpId1 < numGroups);
183  assert(grpId2 < numGroups);
184 
185  // initialize the configuration of a connection
186  ConnectConfig connConfig;
187 
188  connConfig.grpSrc = grpId1;
189  connConfig.grpDest = grpId2;
190  connConfig.initWt = 0.0f;
191  connConfig.maxWt = 0.0f;
192  connConfig.maxDelay = MAX_SYN_DELAY;
193  connConfig.minDelay = 1;
194  connConfig.mulSynFast = _mulSynFast;
195  connConfig.mulSynSlow = _mulSynSlow;
196  connConfig.connProp = SET_CONN_PRESENT(1) | SET_FIXED_PLASTIC(synWtType);
197  connConfig.type = CONN_USER_DEFINED;
198  connConfig.conn = conn;
199  connConfig.connectionMonitorId = -1;
200  connConfig.connId = -1;
201  connConfig.numberOfConnections = 0;
202 
203  // assign a connection id
204  assert(connConfig.connId == -1);
205  connConfig.connId = numConnections;
206 
207  // store the configuration of a connection
208  connectConfigMap[numConnections] = connConfig; // connConfig.connId == numConnections
209 
210  assert(numConnections < MAX_CONN_PER_SNN); // make sure we don't overflow connId
211  numConnections++;
212 
213  return (numConnections - 1);
214 }
215 
216 // make a compartmental connection between two groups
217 short int SNN::connectCompartments(int grpIdLower, int grpIdUpper) {
218  assert(grpIdLower >= 0 && grpIdLower < numGroups);
219  assert(grpIdUpper >= 0 && grpIdUpper < numGroups);
220  assert(grpIdLower != grpIdUpper);
221  assert(!isPoissonGroup(grpIdLower));
222  assert(!isPoissonGroup(grpIdUpper));
223 
224  // the two groups must be located on the same partition
225  assert(groupConfigMap[grpIdLower].preferredNetId == groupConfigMap[grpIdUpper].preferredNetId);
226 
227  // this flag must be set if any compartmental connections exist
228  // note that grpId.withCompartments is not necessarily set just yet, this will be done in
229  // CpuSNN::setCompartmentParameters
230  sim_with_compartments = true;
231 
232  compConnectConfig compConnConfig;
233 
234  compConnConfig.grpSrc = grpIdLower;
235  compConnConfig.grpDest = grpIdUpper;
236  compConnConfig.connId = -1;
237 
238  // assign a connection id
239  assert(compConnConfig.connId == -1);
240  compConnConfig.connId = numCompartmentConnections;
241 
242  // store the configuration of a connection
243  compConnectConfigMap[numCompartmentConnections] = compConnConfig;
244 
245  numCompartmentConnections++;
246 
247  return (numCompartmentConnections - 1);
248 }
249 
250 // create group of Izhikevich neurons
251 // use int for nNeur to avoid arithmetic underflow
252 int SNN::createGroup(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
253  assert(grid.numX * grid.numY * grid.numZ > 0);
254  assert(neurType >= 0);
255  assert(numGroups < MAX_GRP_PER_SNN);
256 
257  if ( (!(neurType & TARGET_AMPA) && !(neurType & TARGET_NMDA) &&
258  !(neurType & TARGET_GABAa) && !(neurType & TARGET_GABAb)) || (neurType & POISSON_NEURON)) {
259  KERNEL_ERROR("Invalid type using createGroup... Cannot create poisson generators here.");
260  exitSimulation(1);
261  }
262 
263  // initialize group configuration
264  GroupConfig grpConfig;
265  GroupConfigMD grpConfigMD;
266 
267  //All groups are non-compartmental by default
268  grpConfig.withCompartments = false;
269 
270  // init parameters of neural group size and location
271  grpConfig.grpName = grpName;
272  grpConfig.type = neurType;
273  grpConfig.numN = grid.N;
274 
275  grpConfig.isSpikeGenerator = false;
276  grpConfig.grid = grid;
277  grpConfig.isLIF = false;
278 
279  if (preferredPartition == ANY) {
280  grpConfig.preferredNetId = ANY;
281  } else if (preferredBackend == CPU_CORES) {
282  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
283  } else {
284  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
285  }
286 
287  // assign a global group id
288  grpConfigMD.gGrpId = numGroups;
289 
290  // store the configuration of a group
291  groupConfigMap[numGroups] = grpConfig; // numGroups == grpId
292  groupConfigMDMap[numGroups] = grpConfigMD;
293 
294  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
295  numGroups++;
296 
297  return grpConfigMD.gGrpId;
298 }
299 
300 // create group of LIF neurons
301 // use int for nNeur to avoid arithmetic underflow
302 int SNN::createGroupLIF(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
303  assert(grid.numX * grid.numY * grid.numZ > 0);
304  assert(neurType >= 0);
305  assert(numGroups < MAX_GRP_PER_SNN);
306 
307  if ( (!(neurType & TARGET_AMPA) && !(neurType & TARGET_NMDA) &&
308  !(neurType & TARGET_GABAa) && !(neurType & TARGET_GABAb)) || (neurType & POISSON_NEURON)) {
309  KERNEL_ERROR("Invalid type using createGroup... Cannot create poisson generators here.");
310  exitSimulation(1);
311  }
312 
313  // initialize group configuration
314  GroupConfig grpConfig;
315  GroupConfigMD grpConfigMD;
316 
317  // init parameters of neural group size and location
318  grpConfig.grpName = grpName;
319  grpConfig.type = neurType;
320  grpConfig.numN = grid.N;
321 
322  grpConfig.isLIF = true;
323  grpConfig.isSpikeGenerator = false;
324  grpConfig.grid = grid;
325 
326  if (preferredPartition == ANY) {
327  grpConfig.preferredNetId = ANY;
328  } else if (preferredBackend == CPU_CORES) {
329  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
330  } else {
331  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
332  }
333 
334  // assign a global group id
335  grpConfigMD.gGrpId = numGroups;
336 
337  // store the configuration of a group
338  groupConfigMap[numGroups] = grpConfig; // numGroups == grpId
339  groupConfigMDMap[numGroups] = grpConfigMD;
340 
341  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
342  numGroups++;
343 
344  return grpConfigMD.gGrpId;
345 }
346 
347 // create spike generator group
348 // use int for nNeur to avoid arithmetic underflow
349 int SNN::createSpikeGeneratorGroup(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
350  assert(grid.numX * grid.numY * grid.numZ > 0);
351  assert(neurType >= 0);
352  assert(numGroups < MAX_GRP_PER_SNN);
353 
354  // initialize group configuration
355  GroupConfig grpConfig;
356  GroupConfigMD grpConfigMD;
357 
358  //All groups are non-compartmental by default FIXME:IS THIS NECESSARY?
359  grpConfig.withCompartments = false;
360 
361  // init parameters of neural group size and location
362  grpConfig.grpName = grpName;
363  grpConfig.type = neurType | POISSON_NEURON;
364  grpConfig.numN = grid.N;
365  grpConfig.isSpikeGenerator = true;
366  grpConfig.grid = grid;
367  grpConfig.isLIF = false;
368 
369  if (preferredPartition == ANY) {
370  grpConfig.preferredNetId = ANY;
371  }
372  else if (preferredBackend == CPU_CORES) {
373  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
374  }
375  else {
376  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
377  }
378 
379  // assign a global group id
380  grpConfigMD.gGrpId = numGroups;
381 
382  // store the configuration of a group
383  groupConfigMap[numGroups] = grpConfig;
384  groupConfigMDMap[numGroups] = grpConfigMD;
385 
386  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
387  numGroups++;
388  numSpikeGenGrps++;
389 
390  return grpConfigMD.gGrpId;
391 }
392 
393 void SNN::setCompartmentParameters(int gGrpId, float couplingUp, float couplingDown) {
394  if (gGrpId == ALL) {
395  for (int grpId = 0; grpId<numGroups; grpId++) {
396  setCompartmentParameters(grpId, couplingUp, couplingDown);
397  }
398  }
399  else {
400  groupConfigMap[gGrpId].withCompartments = true;
401  groupConfigMap[gGrpId].compCouplingUp = couplingUp;
402  groupConfigMap[gGrpId].compCouplingDown = couplingDown;
403  glbNetworkConfig.numComp += groupConfigMap[gGrpId].numN;
404  }
405 }
406 
407 
408 // set conductance values for a simulation (custom values or disable conductances alltogether)
409 void SNN::setConductances(bool isSet, int tdAMPA, int trNMDA, int tdNMDA, int tdGABAa, int trGABAb, int tdGABAb) {
410  if (isSet) {
411  assert(tdAMPA>0); assert(tdNMDA>0); assert(tdGABAa>0); assert(tdGABAb>0);
412  assert(trNMDA>=0); assert(trGABAb>=0); // 0 to disable rise times
413  assert(trNMDA!=tdNMDA); assert(trGABAb!=tdGABAb); // singularity
414  }
415 
416  // set conductances globally for all connections
417  sim_with_conductances |= isSet;
418  dAMPA = 1.0-1.0/tdAMPA;
419  dNMDA = 1.0-1.0/tdNMDA;
420  dGABAa = 1.0-1.0/tdGABAa;
421  dGABAb = 1.0-1.0/tdGABAb;
422 
423  if (trNMDA>0) {
424  // use rise time for NMDA
425  sim_with_NMDA_rise = true;
426  rNMDA = 1.0-1.0/trNMDA;
427 
428  // compute max conductance under this model to scale it back to 1
429  // otherwise the peak conductance will not be equal to the weight
430  double tmax = (-tdNMDA*trNMDA*log(1.0*trNMDA/tdNMDA))/(tdNMDA-trNMDA); // t at which cond will be max
431  sNMDA = 1.0/(exp(-tmax/tdNMDA)-exp(-tmax/trNMDA)); // scaling factor, 1 over max amplitude
432  assert(!isinf(tmax) && !isnan(tmax) && tmax>=0);
433  assert(!isinf(sNMDA) && !isnan(sNMDA) && sNMDA>0);
434  }
435 
436  if (trGABAb>0) {
437  // use rise time for GABAb
438  sim_with_GABAb_rise = true;
439  rGABAb = 1.0-1.0/trGABAb;
440 
441  // compute max conductance under this model to scale it back to 1
442  // otherwise the peak conductance will not be equal to the weight
443  double tmax = (-tdGABAb*trGABAb*log(1.0*trGABAb/tdGABAb))/(tdGABAb-trGABAb); // t at which cond will be max
444  sGABAb = 1.0/(exp(-tmax/tdGABAb)-exp(-tmax/trGABAb)); // scaling factor, 1 over max amplitude
445  assert(!isinf(tmax) && !isnan(tmax)); assert(!isinf(sGABAb) && !isnan(sGABAb) && sGABAb>0);
446  }
447 
448  if (sim_with_conductances) {
449  KERNEL_INFO("Running COBA mode:");
450  KERNEL_INFO(" - AMPA decay time = %5d ms", tdAMPA);
451  KERNEL_INFO(" - NMDA rise time %s = %5d ms", sim_with_NMDA_rise?" ":"(disabled)", trNMDA);
452  KERNEL_INFO(" - GABAa decay time = %5d ms", tdGABAa);
453  KERNEL_INFO(" - GABAb rise time %s = %5d ms", sim_with_GABAb_rise?" ":"(disabled)",trGABAb);
454  KERNEL_INFO(" - GABAb decay time = %5d ms", tdGABAb);
455  } else {
456  KERNEL_INFO("Running CUBA mode (all synaptic conductances disabled)");
457  }
458 }
459 
460 // set homeostasis for group
461 void SNN::setHomeostasis(int gGrpId, bool isSet, float homeoScale, float avgTimeScale) {
462  if (gGrpId == ALL) { // shortcut for all groups
463  for(int grpId = 0; grpId < numGroups; grpId++) {
464  setHomeostasis(grpId, isSet, homeoScale, avgTimeScale);
465  }
466  } else {
467  // set conductances for a given group
468  sim_with_homeostasis |= isSet;
469  groupConfigMap[gGrpId].homeoConfig.WithHomeostasis = isSet;
470  groupConfigMap[gGrpId].homeoConfig.homeostasisScale = homeoScale;
471  groupConfigMap[gGrpId].homeoConfig.avgTimeScale = avgTimeScale;
472  groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv = 1.0f / avgTimeScale;
473  groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay = (avgTimeScale * 1000.0f - 1.0f) / (avgTimeScale * 1000.0f);
474 
475  KERNEL_INFO("Homeostasis parameters %s for %d (%s):\thomeoScale: %f, avgTimeScale: %f",
476  isSet?"enabled":"disabled", gGrpId, groupConfigMap[gGrpId].grpName.c_str(), homeoScale, avgTimeScale);
477  }
478 }
479 
480 // set a homeostatic target firing rate (enforced through homeostatic synaptic scaling)
481 void SNN::setHomeoBaseFiringRate(int gGrpId, float baseFiring, float baseFiringSD) {
482  if (gGrpId == ALL) { // shortcut for all groups
483  for(int grpId = 0; grpId < numGroups; grpId++) {
484  setHomeoBaseFiringRate(grpId, baseFiring, baseFiringSD);
485  }
486  } else {
487  // set homeostatsis for a given group
488  groupConfigMap[gGrpId].homeoConfig.baseFiring = baseFiring;
489  groupConfigMap[gGrpId].homeoConfig.baseFiringSD = baseFiringSD;
490 
491  KERNEL_INFO("Homeostatic base firing rate set for %d (%s):\tbaseFiring: %3.3f, baseFiringStd: %3.3f",
492  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), baseFiring, baseFiringSD);
493  }
494 }
495 
496 
497 void SNN::setIntegrationMethod(integrationMethod_t method, int numStepsPerMs) {
498  assert(numStepsPerMs >= 1 && numStepsPerMs <= 100);
499  glbNetworkConfig.simIntegrationMethod = method;
500  glbNetworkConfig.simNumStepsPerMs = numStepsPerMs;
501  glbNetworkConfig.timeStep = 1.0f / numStepsPerMs;
502 }
503 
504 // set Izhikevich parameters for group
505 void SNN::setNeuronParameters(int gGrpId, float izh_a, float izh_a_sd, float izh_b, float izh_b_sd,
506  float izh_c, float izh_c_sd, float izh_d, float izh_d_sd)
507 {
508  assert(gGrpId >= -1);
509  assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
510 
511  if (gGrpId == ALL) { // shortcut for all groups
512  for(int grpId = 0; grpId < numGroups; grpId++) {
513  setNeuronParameters(grpId, izh_a, izh_a_sd, izh_b, izh_b_sd, izh_c, izh_c_sd, izh_d, izh_d_sd);
514  }
515  } else {
516  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
517  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
518  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
519  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
520  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
521  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
522  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
523  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
524  groupConfigMap[gGrpId].withParamModel_9 = 0;
525  groupConfigMap[gGrpId].isLIF = 0;
526  }
527 }
528 
529 // set (9) Izhikevich parameters for group
530 void SNN::setNeuronParameters(int gGrpId, float izh_C, float izh_C_sd, float izh_k, float izh_k_sd,
531  float izh_vr, float izh_vr_sd, float izh_vt, float izh_vt_sd,
532  float izh_a, float izh_a_sd, float izh_b, float izh_b_sd,
533  float izh_vpeak, float izh_vpeak_sd, float izh_c, float izh_c_sd,
534  float izh_d, float izh_d_sd)
535 {
536  assert(gGrpId >= -1);
537  assert(izh_C_sd >= 0); assert(izh_k_sd >= 0); assert(izh_vr_sd >= 0);
538  assert(izh_vt_sd >= 0); assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_vpeak_sd >= 0);
539  assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
540 
541  if (gGrpId == ALL) { // shortcut for all groups
542  for (int grpId = 0; grpId<numGroups; grpId++) {
543  setNeuronParameters(grpId, izh_C, izh_C_sd, izh_k, izh_k_sd, izh_vr, izh_vr_sd, izh_vt, izh_vt_sd,
544  izh_a, izh_a_sd, izh_b, izh_b_sd, izh_vpeak, izh_vpeak_sd, izh_c, izh_c_sd,
545  izh_d, izh_d_sd);
546  }
547  }
548  else {
549  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
550  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
551  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
552  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
553  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
554  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
555  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
556  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
557  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C = izh_C;
558  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd = izh_C_sd;
559  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k = izh_k;
560  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd = izh_k_sd;
561  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr = izh_vr;
562  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd = izh_vr_sd;
563  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt = izh_vt;
564  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd = izh_vt_sd;
565  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak = izh_vpeak;
566  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd = izh_vpeak_sd;
567  groupConfigMap[gGrpId].withParamModel_9 = 1;
568  groupConfigMap[gGrpId].isLIF = 0;
569  KERNEL_INFO("Set a nine parameter group!");
570  }
571 }
572 
573 
574 // set LIF parameters for the group
575 void SNN::setNeuronParametersLIF(int gGrpId, int tau_m, int tau_ref, float vTh, float vReset, double minRmem, double maxRmem)
576 {
577  assert(gGrpId >= -1);
578  assert(tau_m >= 0); assert(tau_ref >= 0); assert(vReset < vTh);
579  assert(minRmem >= 0.0f); assert(minRmem <= maxRmem);
580 
581  if (gGrpId == ALL) { // shortcut for all groups
582  for(int grpId = 0; grpId < numGroups; grpId++) {
583  setNeuronParametersLIF(grpId, tau_m, tau_ref, vTh, vReset, minRmem, maxRmem);
584  }
585  } else {
586  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m = tau_m;
587  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref = tau_ref;
588  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh = vTh;
589  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset = vReset;
590  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem = minRmem;
591  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem = maxRmem;
592  groupConfigMap[gGrpId].withParamModel_9 = 0;
593  groupConfigMap[gGrpId].isLIF = 1;
594  }
595 }
596 
597 void SNN::setNeuromodulator(int gGrpId, float baseDP, float tauDP, float base5HT, float tau5HT, float baseACh,
598  float tauACh, float baseNE, float tauNE) {
599 
600  assert(gGrpId >= -1);
601  assert(baseDP > 0.0f); assert(base5HT > 0.0f); assert(baseACh > 0.0f); assert(baseNE > 0.0f);
602  assert(tauDP > 0); assert(tau5HT > 0); assert(tauACh > 0); assert(tauNE > 0);
603 
604  if (gGrpId == ALL) { // shortcut for all groups
605  for (int grpId = 0; grpId < numGroups; grpId++) {
606  setNeuromodulator(grpId, baseDP, tauDP, base5HT, tau5HT, baseACh, tauACh, baseNE, tauNE);
607  }
608  } else {
609  groupConfigMap[gGrpId].neuromodulatorConfig.baseDP = baseDP;
610  groupConfigMap[gGrpId].neuromodulatorConfig.decayDP = 1.0f - (1.0f / tauDP);
611  groupConfigMap[gGrpId].neuromodulatorConfig.base5HT = base5HT;
612  groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT = 1.0f - (1.0f / tau5HT);
613  groupConfigMap[gGrpId].neuromodulatorConfig.baseACh = baseACh;
614  groupConfigMap[gGrpId].neuromodulatorConfig.decayACh = 1.0f - (1.0f / tauACh);
615  groupConfigMap[gGrpId].neuromodulatorConfig.baseNE = baseNE;
616  groupConfigMap[gGrpId].neuromodulatorConfig.decayNE = 1.0f - (1.0f / tauNE);
617  }
618 }
619 
620 // set ESTDP params
621 void SNN::setESTDP(int gGrpId, bool isSet, STDPType type, STDPCurve curve, float alphaPlus, float tauPlus, float alphaMinus, float tauMinus, float gamma) {
622  assert(gGrpId >= -1);
623  if (isSet) {
624  assert(type!=UNKNOWN_STDP);
625  assert(tauPlus > 0.0f); assert(tauMinus > 0.0f); assert(gamma >= 0.0f);
626  }
627 
628  if (gGrpId == ALL) { // shortcut for all groups
629  for(int grpId = 0; grpId < numGroups; grpId++) {
630  setESTDP(grpId, isSet, type, curve, alphaPlus, tauPlus, alphaMinus, tauMinus, gamma);
631  }
632  } else {
633  // set STDP for a given group
634  // set params for STDP curve
635  groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC = alphaPlus;
636  groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC = alphaMinus;
637  groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC = 1.0f / tauPlus;
638  groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC = 1.0f / tauMinus;
639  groupConfigMap[gGrpId].stdpConfig.GAMMA = gamma;
640  groupConfigMap[gGrpId].stdpConfig.KAPPA = (1 + exp(-gamma / tauPlus)) / (1 - exp(-gamma / tauPlus));
641  groupConfigMap[gGrpId].stdpConfig.OMEGA = alphaPlus * (1 - groupConfigMap[gGrpId].stdpConfig.KAPPA);
642  // set flags for STDP function
643  groupConfigMap[gGrpId].stdpConfig.WithESTDPtype = type;
644  groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve = curve;
645  groupConfigMap[gGrpId].stdpConfig.WithESTDP = isSet;
646  groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithESTDP;
647  sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
648 
649  KERNEL_INFO("E-STDP %s for %s(%d)", isSet?"enabled":"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
650  }
651 }
652 
653 // set ISTDP params
654 void SNN::setISTDP(int gGrpId, bool isSet, STDPType type, STDPCurve curve, float ab1, float ab2, float tau1, float tau2) {
655  assert(gGrpId >= -1);
656  if (isSet) {
657  assert(type != UNKNOWN_STDP);
658  assert(tau1 > 0); assert(tau2 > 0);
659  }
660 
661  if (gGrpId==ALL) { // shortcut for all groups
662  for(int grpId = 0; grpId < numGroups; grpId++) {
663  setISTDP(grpId, isSet, type, curve, ab1, ab2, tau1, tau2);
664  }
665  } else {
666  // set STDP for a given group
667  // set params for STDP curve
668  if (curve == EXP_CURVE) {
669  groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = ab1;
670  groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = ab2;
671  groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f / tau1;
672  groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f / tau2;
673  groupConfigMap[gGrpId].stdpConfig.BETA_LTP = 0.0f;
674  groupConfigMap[gGrpId].stdpConfig.BETA_LTD = 0.0f;
675  groupConfigMap[gGrpId].stdpConfig.LAMBDA = 1.0f;
676  groupConfigMap[gGrpId].stdpConfig.DELTA = 1.0f;
677  } else {
678  groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = 0.0f;
679  groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = 0.0f;
680  groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f;
681  groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f;
682  groupConfigMap[gGrpId].stdpConfig.BETA_LTP = ab1;
683  groupConfigMap[gGrpId].stdpConfig.BETA_LTD = ab2;
684  groupConfigMap[gGrpId].stdpConfig.LAMBDA = tau1;
685  groupConfigMap[gGrpId].stdpConfig.DELTA = tau2;
686  }
687  // set flags for STDP function
688  //FIXME: separate STDPType to ESTDPType and ISTDPType
689  groupConfigMap[gGrpId].stdpConfig.WithISTDPtype = type;
690  groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve = curve;
691  groupConfigMap[gGrpId].stdpConfig.WithISTDP = isSet;
692  groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithISTDP;
693  sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
694 
695  KERNEL_INFO("I-STDP %s for %s(%d)", isSet?"enabled":"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
696  }
697 }
698 
699 // set STP params
700 void SNN::setSTP(int gGrpId, bool isSet, float STP_U, float STP_tau_u, float STP_tau_x) {
701  assert(gGrpId >= -1);
702  if (isSet) {
703  assert(STP_U > 0 && STP_U <= 1); assert(STP_tau_u > 0); assert(STP_tau_x > 0);
704  }
705 
706  if (gGrpId == ALL) { // shortcut for all groups
707  for(int grpId = 0; grpId < numGroups; grpId++) {
708  setSTP(grpId, isSet, STP_U, STP_tau_u, STP_tau_x);
709  }
710  } else {
711  // set STDP for a given group
712  sim_with_stp |= isSet;
713  groupConfigMap[gGrpId].stpConfig.WithSTP = isSet;
714  groupConfigMap[gGrpId].stpConfig.STP_A = (STP_U > 0.0f) ? 1.0 / STP_U : 1.0f; // scaling factor
715  groupConfigMap[gGrpId].stpConfig.STP_U = STP_U;
716  groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv = 1.0f / STP_tau_u; // facilitatory
717  groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv = 1.0f / STP_tau_x; // depressive
718 
719  KERNEL_INFO("STP %s for %d (%s):\tA: %1.4f, U: %1.4f, tau_u: %4.0f, tau_x: %4.0f", isSet?"enabled":"disabled",
720  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMap[gGrpId].stpConfig.STP_A, STP_U, STP_tau_u, STP_tau_x);
721  }
722 }
723 
724 void SNN::setWeightAndWeightChangeUpdate(UpdateInterval wtANDwtChangeUpdateInterval, bool enableWtChangeDecay, float wtChangeDecay) {
725  assert(wtChangeDecay > 0.0f && wtChangeDecay < 1.0f);
726 
727  switch (wtANDwtChangeUpdateInterval) {
728  case INTERVAL_10MS:
729  wtANDwtChangeUpdateInterval_ = 10;
730  break;
731  case INTERVAL_100MS:
732  wtANDwtChangeUpdateInterval_ = 100;
733  break;
734  case INTERVAL_1000MS:
735  default:
736  wtANDwtChangeUpdateInterval_ = 1000;
737  break;
738  }
739 
740  if (enableWtChangeDecay) {
741  // set up stdp factor according to update interval
742  switch (wtANDwtChangeUpdateInterval) {
743  case INTERVAL_10MS:
744  stdpScaleFactor_ = 0.005f;
745  break;
746  case INTERVAL_100MS:
747  stdpScaleFactor_ = 0.05f;
748  break;
749  case INTERVAL_1000MS:
750  default:
751  stdpScaleFactor_ = 0.5f;
752  break;
753  }
754  // set up weight decay
755  wtChangeDecay_ = wtChangeDecay;
756  } else {
757  stdpScaleFactor_ = 1.0f;
758  wtChangeDecay_ = 0.0f;
759  }
760 
761  KERNEL_INFO("Update weight and weight change every %d ms", wtANDwtChangeUpdateInterval_);
762  KERNEL_INFO("Weight Change Decay is %s", enableWtChangeDecay? "enabled" : "disable");
763  KERNEL_INFO("STDP scale factor = %1.3f, wtChangeDecay = %1.3f", stdpScaleFactor_, wtChangeDecay_);
764 }
765 
769 
770 // reorganize the network and do the necessary allocation
771 // of all variable for carrying out the simulation..
772 // this code is run only one time during network initialization
774  switch (snnState) {
775  case CONFIG_SNN:
776  compileSNN();
777  case COMPILED_SNN:
778  partitionSNN();
779  case PARTITIONED_SNN:
780  generateRuntimeSNN();
781  break;
782  case EXECUTABLE_SNN:
783  break;
784  default:
785  KERNEL_ERROR("Unknown SNN state");
786  break;
787  }
788 }
789 
793 
794 int SNN::runNetwork(int _nsec, int _nmsec, bool printRunSummary) {
795  assert(_nmsec >= 0 && _nmsec < 1000);
796  assert(_nsec >= 0);
797  int runDurationMs = _nsec*1000 + _nmsec;
798  KERNEL_DEBUG("runNetwork: runDur=%dms, printRunSummary=%s", runDurationMs, printRunSummary?"y":"n");
799 
800  // setupNetwork() must have already been called
801  assert(snnState == EXECUTABLE_SNN);
802 
803  // don't bother printing if logger mode is SILENT
804  printRunSummary = (loggerMode_==SILENT) ? false : printRunSummary;
805 
806  // first-time run: inform the user the simulation is running now
807  if (simTime==0 && printRunSummary) {
808  KERNEL_INFO("");
809  KERNEL_INFO("******************** Running the simulation on %d GPU(s) and %d CPU(s) ***************************", numGPUs, numCores);
810  KERNEL_INFO("");
811  }
812 
813  // reset all spike counters
814  resetSpikeCnt(ALL);
815 
816  // store current start time for future reference
817  simTimeRunStart = simTime;
818  simTimeRunStop = simTime + runDurationMs;
819  assert(simTimeRunStop >= simTimeRunStart); // check for arithmetic underflow
820 
821  // ConnectionMonitor is a special case: we might want the first snapshot at t=0 in the binary
822  // but updateTime() is false for simTime==0.
823  // And we cannot put this code in ConnectionMonitorCore::init, because then the user would have no
824  // way to call ConnectionMonitor::setUpdateTimeIntervalSec before...
825  if (simTime == 0 && numConnectionMonitor) {
827  }
828 
829  // set the Poisson generation time slice to be at the run duration up to MAX_TIME_SLICE
830  setGrpTimeSlice(ALL, std::max(1, std::min(runDurationMs, MAX_TIME_SLICE)));
831 
832 #ifndef __NO_CUDA__
833  CUDA_RESET_TIMER(timer);
834  CUDA_START_TIMER(timer);
835 #endif
836 
837  //KERNEL_INFO("Reached the advSimStep loop!");
838 
839  // if nsec=0, simTimeMs=10, we need to run the simulator for 10 timeStep;
840  // if nsec=1, simTimeMs=10, we need to run the simulator for 1*1000+10, time Step;
841  for(int i = 0; i < runDurationMs; i++) {
842  advSimStep();
843  //KERNEL_INFO("Executed an advSimStep!");
844 
845  // update weight every updateInterval ms if plastic synapses present
846  if (!sim_with_fixedwts && wtANDwtChangeUpdateInterval_ == ++wtANDwtChangeUpdateIntervalCnt_) {
847  wtANDwtChangeUpdateIntervalCnt_ = 0; // reset counter
848  if (!sim_in_testing) {
849  // keep this if statement separate from the above, so that the counter is updated correctly
850  updateWeights();
851  }
852  }
853 
854  // Note: updateTime() advance simTime, simTimeMs, and simTimeSec accordingly
855  if (updateTime()) {
856  // finished one sec of simulation...
857  if (numSpikeMonitor) {
859  }
860  if (numGroupMonitor) {
862  }
863  if (numConnectionMonitor) {
865  }
866  if (numNeuronMonitor) {
868  }
869 
870  shiftSpikeTables();
871  }
872 
873  fetchNeuronSpikeCount(ALL);
874  }
875 
876  //KERNEL_INFO("Updated monitors!");
877 
878  // user can opt to display some runNetwork summary
879  if (printRunSummary) {
880 
881  // if there are Monitors available and it's time to show the log, print status for each group
882  if (numSpikeMonitor) {
883  printStatusSpikeMonitor(ALL);
884  }
885  if (numConnectionMonitor) {
886  printStatusConnectionMonitor(ALL);
887  }
888  if (numGroupMonitor) {
889  printStatusGroupMonitor(ALL);
890  }
891 
892  // record time of run summary print
893  simTimeLastRunSummary = simTime;
894  }
895 
896  // call updateSpike(Group)Monitor again to fetch all the left-over spikes and group status (neuromodulator)
899 
900  // keep track of simulation time...
901 #ifndef __NO_CUDA__
902  CUDA_STOP_TIMER(timer);
903  lastExecutionTime = CUDA_GET_TIMER_VALUE(timer);
904  cumExecutionTime += lastExecutionTime;
905 #endif
906  return 0;
907 }
908 
909 
910 
914 
915 // adds a bias to every weight in the connection
916 void SNN::biasWeights(short int connId, float bias, bool updateWeightRange) {
917  assert(connId>=0 && connId<numConnections);
918 
919  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
920  int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
921 
922  fetchPreConnectionInfo(netId);
923  fetchConnIdsLookupArray(netId);
924  fetchSynapseState(netId);
925  // iterate over all postsynaptic neurons
926  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
927  unsigned int cumIdx = managerRuntimeData.cumulativePre[lNId];
928 
929  // iterate over all presynaptic neurons
930  unsigned int pos_ij = cumIdx;
931  for (int j = 0; j < managerRuntimeData.Npre[lNId]; pos_ij++, j++) {
932  if (managerRuntimeData.connIdsPreIdx[pos_ij] == connId) {
933  // apply bias to weight
934  float weight = managerRuntimeData.wt[pos_ij] + bias;
935 
936  // inform user of acton taken if weight is out of bounds
937 // bool needToPrintDebug = (weight+bias>connInfo->maxWt || weight+bias<connInfo->minWt);
938  bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
939 
940  if (updateWeightRange) {
941  // if this flag is set, we need to update minWt,maxWt accordingly
942  // will be saving new maxSynWt and copying to GPU below
943 // connInfo->minWt = fmin(connInfo->minWt, weight);
944  connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
945  if (needToPrintDebug) {
946  KERNEL_DEBUG("biasWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, bias,
947  (updateWeightRange?"true":"false"), 0.0f, connectConfigMap[connId].maxWt);
948  }
949  } else {
950  // constrain weight to boundary values
951  // compared to above, we swap minWt/maxWt logic
952  weight = std::min(weight, connectConfigMap[connId].maxWt);
953 // weight = fmax(weight, connInfo->minWt);
954  weight = std::max(weight, 0.0f);
955  if (needToPrintDebug) {
956  KERNEL_DEBUG("biasWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, bias,
957  (updateWeightRange?"true":"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
958  }
959  }
960 
961  // update datastructures
962  managerRuntimeData.wt[pos_ij] = weight;
963  managerRuntimeData.maxSynWt[pos_ij] = connectConfigMap[connId].maxWt; // it's easier to just update, even if it hasn't changed
964  }
965  }
966 
967  // update GPU datastructures in batches, grouped by post-neuron
968  if (netId < CPU_RUNTIME_BASE) {
969 #ifndef __NO_CUDA__
970  CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].wt[cumIdx]), &(managerRuntimeData.wt[cumIdx]), sizeof(float)*managerRuntimeData.Npre[lNId],
971  cudaMemcpyHostToDevice) );
972 
973  if (runtimeData[netId].maxSynWt != NULL) {
974  // only copy maxSynWt if datastructure actually exists on the GPU runtime
975  // (that logic should be done elsewhere though)
976  CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].maxSynWt[cumIdx]), &(managerRuntimeData.maxSynWt[cumIdx]),
977  sizeof(float) * managerRuntimeData.Npre[lNId], cudaMemcpyHostToDevice) );
978  }
979 #else
980  assert(false);
981 #endif
982  } else {
983  memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
984 
985  if (runtimeData[netId].maxSynWt != NULL) {
986  // only copy maxSynWt if datastructure actually exists on the CPU runtime
987  // (that logic should be done elsewhere though)
988  memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
989  }
990  }
991  }
992 }
993 
994 // deallocates dynamical structures and exits
995 void SNN::exitSimulation(int val) {
996  deleteObjects();
997  exit(val);
998 }
999 
1000 // reads network state from file
1001 void SNN::loadSimulation(FILE* fid) {
1002  loadSimFID = fid;
1003 }
1004 
1005 // multiplies every weight with a scaling factor
1006 void SNN::scaleWeights(short int connId, float scale, bool updateWeightRange) {
1007  assert(connId>=0 && connId<numConnections);
1008  assert(scale>=0.0f);
1009 
1010  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1011  int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1012 
1013  fetchPreConnectionInfo(netId);
1014  fetchConnIdsLookupArray(netId);
1015  fetchSynapseState(netId);
1016 
1017  // iterate over all postsynaptic neurons
1018  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
1019  unsigned int cumIdx = managerRuntimeData.cumulativePre[lNId];
1020 
1021  // iterate over all presynaptic neurons
1022  unsigned int pos_ij = cumIdx;
1023  for (int j = 0; j < managerRuntimeData.Npre[lNId]; pos_ij++, j++) {
1024  if (managerRuntimeData.connIdsPreIdx[pos_ij]==connId) {
1025  // apply bias to weight
1026  float weight = managerRuntimeData.wt[pos_ij] * scale;
1027 
1028  // inform user of acton taken if weight is out of bounds
1029 // bool needToPrintDebug = (weight>connInfo->maxWt || weight<connInfo->minWt);
1030  bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
1031 
1032  if (updateWeightRange) {
1033  // if this flag is set, we need to update minWt,maxWt accordingly
1034  // will be saving new maxSynWt and copying to GPU below
1035 // connInfo->minWt = fmin(connInfo->minWt, weight);
1036  connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
1037  if (needToPrintDebug) {
1038  KERNEL_DEBUG("scaleWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, scale,
1039  (updateWeightRange?"true":"false"), 0.0f, connectConfigMap[connId].maxWt);
1040  }
1041  } else {
1042  // constrain weight to boundary values
1043  // compared to above, we swap minWt/maxWt logic
1044  weight = std::min(weight, connectConfigMap[connId].maxWt);
1045 // weight = fmax(weight, connInfo->minWt);
1046  weight = std::max(weight, 0.0f);
1047  if (needToPrintDebug) {
1048  KERNEL_DEBUG("scaleWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, scale,
1049  (updateWeightRange?"true":"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
1050  }
1051  }
1052 
1053  // update datastructures
1054  managerRuntimeData.wt[pos_ij] = weight;
1055  managerRuntimeData.maxSynWt[pos_ij] = connectConfigMap[connId].maxWt; // it's easier to just update, even if it hasn't changed
1056  }
1057  }
1058 
1059  // update GPU datastructures in batches, grouped by post-neuron
1060  if (netId < CPU_RUNTIME_BASE) {
1061 #ifndef __NO_CUDA__
1062  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float)*managerRuntimeData.Npre[lNId],
1063  cudaMemcpyHostToDevice));
1064 
1065  if (runtimeData[netId].maxSynWt != NULL) {
1066  // only copy maxSynWt if datastructure actually exists on the GPU runtime
1067  // (that logic should be done elsewhere though)
1068  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx],
1069  sizeof(float) * managerRuntimeData.Npre[lNId], cudaMemcpyHostToDevice));
1070  }
1071 #else
1072  assert(false);
1073 #endif
1074  } else {
1075  memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1076 
1077  if (runtimeData[netId].maxSynWt != NULL) {
1078  // only copy maxSynWt if datastructure actually exists on the CPU runtime
1079  // (that logic should be done elsewhere though)
1080  memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1081  }
1082  }
1083  }
1084 }
1085 
1086 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
1087 // or groupConfigMap is not sync with groupConfigs[0][]
1088 GroupMonitor* SNN::setGroupMonitor(int gGrpId, FILE* fid) {
1089  int netId = groupConfigMDMap[gGrpId].netId;
1090  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1091 
1092  // check whether group already has a GroupMonitor
1093  if (groupConfigMDMap[gGrpId].groupMonitorId >= 0) {
1094  KERNEL_ERROR("setGroupMonitor has already been called on Group %d (%s).", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1095  exitSimulation(1);
1096  }
1097 
1098  // create new GroupMonitorCore object in any case and initialize analysis components
1099  // grpMonObj destructor (see below) will deallocate it
1100  GroupMonitorCore* grpMonCoreObj = new GroupMonitorCore(this, numGroupMonitor, gGrpId);
1101  groupMonCoreList[numGroupMonitor] = grpMonCoreObj;
1102 
1103  // assign group status file ID if we selected to write to a file, else it's NULL
1104  // if file pointer exists, it has already been fopened
1105  // this will also write the header section of the group status file
1106  // grpMonCoreObj destructor will fclose it
1107  grpMonCoreObj->setGroupFileId(fid);
1108 
1109  // create a new GroupMonitor object for the user-interface
1110  // SNN::deleteObjects will deallocate it
1111  GroupMonitor* grpMonObj = new GroupMonitor(grpMonCoreObj);
1112  groupMonList[numGroupMonitor] = grpMonObj;
1113 
1114  // also inform the group that it is being monitored...
1115  groupConfigMDMap[gGrpId].groupMonitorId = numGroupMonitor;
1116 
1117  numGroupMonitor++;
1118  KERNEL_INFO("GroupMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1119 
1120  return grpMonObj;
1121 }
1122 
1123 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where group(connect)Config[] might not be available
1124 // or group(connect)ConfigMap is not sync with group(connect)Config[]
1125 ConnectionMonitor* SNN::setConnectionMonitor(int grpIdPre, int grpIdPost, FILE* fid) {
1126  // find connection based on pre-post pair
1127  short int connId = getConnectId(grpIdPre, grpIdPost);
1128  if (connId<0) {
1129  KERNEL_ERROR("No connection found from group %d(%s) to group %d(%s)", grpIdPre, getGroupName(grpIdPre).c_str(),
1130  grpIdPost, getGroupName(grpIdPost).c_str());
1131  exitSimulation(1);
1132  }
1133 
1134  // check whether connection already has a connection monitor
1135  if (connectConfigMap[connId].connectionMonitorId >= 0) {
1136  KERNEL_ERROR("setConnectionMonitor has already been called on Connection %d (MonitorId=%d)", connId, connectConfigMap[connId].connectionMonitorId);
1137  exitSimulation(1);
1138  }
1139 
1140  // inform the connection that it is being monitored...
1141  // this needs to be called before new ConnectionMonitorCore
1142  connectConfigMap[connId].connectionMonitorId = numConnectionMonitor;
1143 
1144  // create new ConnectionMonitorCore object in any case and initialize
1145  // connMonObj destructor (see below) will deallocate it
1146  ConnectionMonitorCore* connMonCoreObj = new ConnectionMonitorCore(this, numConnectionMonitor, connId,
1147  grpIdPre, grpIdPost);
1148  connMonCoreList[numConnectionMonitor] = connMonCoreObj;
1149 
1150  // assign conn file ID if we selected to write to a file, else it's NULL
1151  // if file pointer exists, it has already been fopened
1152  // this will also write the header section of the conn file
1153  // connMonCoreObj destructor will fclose it
1154  connMonCoreObj->setConnectFileId(fid);
1155 
1156  // create a new ConnectionMonitor object for the user-interface
1157  // SNN::deleteObjects will deallocate it
1158  ConnectionMonitor* connMonObj = new ConnectionMonitor(connMonCoreObj);
1159  connMonList[numConnectionMonitor] = connMonObj;
1160 
1161  // now init core object (depends on several datastructures allocated above)
1162  connMonCoreObj->init();
1163 
1164  numConnectionMonitor++;
1165  KERNEL_INFO("ConnectionMonitor %d set for Connection %d: %d(%s) => %d(%s)", connectConfigMap[connId].connectionMonitorId, connId, grpIdPre, getGroupName(grpIdPre).c_str(),
1166  grpIdPost, getGroupName(grpIdPost).c_str());
1167 
1168  return connMonObj;
1169 }
1170 
1171 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
1172 // or groupConfigMap is not sync with groupConfigs[0][]
1173 // sets up a spike generator
1174 void SNN::setSpikeGenerator(int gGrpId, SpikeGeneratorCore* spikeGenFunc) {
1175  assert(snnState == CONFIG_SNN); // must be called before setupNetwork() to work on GPU
1176  assert(spikeGenFunc);
1177  assert(groupConfigMap[gGrpId].isSpikeGenerator);
1178  groupConfigMap[gGrpId].spikeGenFunc = spikeGenFunc;
1179 }
1180 
1181 // record spike information, return a SpikeInfo object
1182 SpikeMonitor* SNN::setSpikeMonitor(int gGrpId, FILE* fid) {
1183  // check whether group already has a SpikeMonitor
1184  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1185  // in this case, return the current object and update fid
1186  SpikeMonitor* spkMonObj = getSpikeMonitor(gGrpId);
1187 
1188  // update spike file ID
1189  SpikeMonitorCore* spkMonCoreObj = getSpikeMonitorCore(gGrpId);
1190  spkMonCoreObj->setSpikeFileId(fid);
1191 
1192  KERNEL_INFO("SpikeMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1193  return spkMonObj;
1194  } else {
1195  // create new SpikeMonitorCore object in any case and initialize analysis components
1196  // spkMonObj destructor (see below) will deallocate it
1197  SpikeMonitorCore* spkMonCoreObj = new SpikeMonitorCore(this, numSpikeMonitor, gGrpId);
1198  spikeMonCoreList[numSpikeMonitor] = spkMonCoreObj;
1199 
1200  // assign spike file ID if we selected to write to a file, else it's NULL
1201  // if file pointer exists, it has already been fopened
1202  // this will also write the header section of the spike file
1203  // spkMonCoreObj destructor will fclose it
1204  spkMonCoreObj->setSpikeFileId(fid);
1205 
1206  // create a new SpikeMonitor object for the user-interface
1207  // SNN::deleteObjects will deallocate it
1208  SpikeMonitor* spkMonObj = new SpikeMonitor(spkMonCoreObj);
1209  spikeMonList[numSpikeMonitor] = spkMonObj;
1210 
1211  // also inform the grp that it is being monitored...
1212  groupConfigMDMap[gGrpId].spikeMonitorId = numSpikeMonitor;
1213 
1214  numSpikeMonitor++;
1215  KERNEL_INFO("SpikeMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1216 
1217  return spkMonObj;
1218  }
1219 }
1220 
1221 // record neuron state information, return a NeuronInfo object
1222 NeuronMonitor* SNN::setNeuronMonitor(int gGrpId, FILE* fid) {
1223  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1224  int netId = groupConfigMDMap[gGrpId].netId;
1225 
1226  if (getGroupNumNeurons(gGrpId) > 128) {
1227  KERNEL_WARN("Due to limited memory space, only the first 128 neurons can be monitored by NeuronMonitor");
1228  }
1229 
1230  // check whether group already has a NeuronMonitor
1231  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1232  // in this case, return the current object and update fid
1233  NeuronMonitor* nrnMonObj = getNeuronMonitor(gGrpId);
1234 
1235  // update neuron file ID
1236  NeuronMonitorCore* nrnMonCoreObj = getNeuronMonitorCore(gGrpId);
1237  nrnMonCoreObj->setNeuronFileId(fid);
1238 
1239  KERNEL_INFO("NeuronMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1240  return nrnMonObj;
1241  } else {
1242  // create new NeuronMonitorCore object in any case and initialize analysis components
1243  // nrnMonObj destructor (see below) will deallocate it
1244  NeuronMonitorCore* nrnMonCoreObj = new NeuronMonitorCore(this, numNeuronMonitor, gGrpId);
1245  neuronMonCoreList[numNeuronMonitor] = nrnMonCoreObj;
1246 
1247  // assign neuron state file ID if we selected to write to a file, else it's NULL
1248  // if file pointer exists, it has already been fopened
1249  // this will also write the header section of the spike file
1250  // spkMonCoreObj destructor will fclose it
1251  nrnMonCoreObj->setNeuronFileId(fid);
1252 
1253  // create a new NeuronMonitor object for the user-interface
1254  // SNN::deleteObjects will deallocate it
1255  NeuronMonitor* nrnMonObj = new NeuronMonitor(nrnMonCoreObj);
1256  neuronMonList[numNeuronMonitor] = nrnMonObj;
1257 
1258  // also inform the grp that it is being monitored...
1259  groupConfigMDMap[gGrpId].neuronMonitorId = numNeuronMonitor;
1260 
1261  numNeuronMonitor++;
1262  KERNEL_INFO("NeuronMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1263 
1264  return nrnMonObj;
1265  }
1266 }
1267 
1268 // FIXME: distinguish the function call at CONFIG_STATE and RUN_STATE, where groupConfigs[0][] might not be available
1269 // or groupConfigMap is not sync with groupConfigs[0][]
1270 // assigns spike rate to group
1271 void SNN::setSpikeRate(int gGrpId, PoissonRate* ratePtr, int refPeriod) {
1272  int netId = groupConfigMDMap[gGrpId].netId;
1273  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1274 
1275  assert(gGrpId >= 0 && lGrpId < networkConfigs[netId].numGroups);
1276  assert(ratePtr);
1277  assert(groupConfigMap[gGrpId].isSpikeGenerator);
1278  assert(ratePtr->getNumNeurons() == groupConfigMap[gGrpId].numN);
1279  assert(refPeriod >= 1);
1280 
1281  groupConfigMDMap[gGrpId].ratePtr = ratePtr;
1282  groupConfigMDMap[gGrpId].refractPeriod = refPeriod;
1283  spikeRateUpdated = true;
1284 }
1285 
1286 // sets the weight value of a specific synapse
1287 void SNN::setWeight(short int connId, int neurIdPre, int neurIdPost, float weight, bool updateWeightRange) {
1288  assert(connId>=0 && connId<getNumConnections());
1289  assert(weight>=0.0f);
1290 
1291  assert(neurIdPre >= 0 && neurIdPre < getGroupNumNeurons(connectConfigMap[connId].grpSrc));
1292  assert(neurIdPost >= 0 && neurIdPost < getGroupNumNeurons(connectConfigMap[connId].grpDest));
1293 
1294  float maxWt = fabs(connectConfigMap[connId].maxWt);
1295  float minWt = 0.0f;
1296 
1297  // inform user of acton taken if weight is out of bounds
1298  bool needToPrintDebug = (weight>maxWt || weight<minWt);
1299 
1300  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1301  int postlGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1302  int prelGrpId = groupConfigMDMap[connectConfigMap[connId].grpSrc].lGrpId;
1303 
1304  fetchPreConnectionInfo(netId);
1305  fetchConnIdsLookupArray(netId);
1306  fetchSynapseState(netId);
1307 
1308  if (updateWeightRange) {
1309  // if this flag is set, we need to update minWt,maxWt accordingly
1310  // will be saving new maxSynWt and copying to GPU below
1311 // connInfo->minWt = fmin(connInfo->minWt, weight);
1312  maxWt = fmax(maxWt, weight);
1313  if (needToPrintDebug) {
1314  KERNEL_DEBUG("setWeight(%d,%d,%d,%f,%s): updated weight ranges to [%f,%f]", connId, neurIdPre, neurIdPost,
1315  weight, (updateWeightRange?"true":"false"), minWt, maxWt);
1316  }
1317  } else {
1318  // constrain weight to boundary values
1319  // compared to above, we swap minWt/maxWt logic
1320  weight = fmin(weight, maxWt);
1321  weight = fmax(weight, minWt);
1322  if (needToPrintDebug) {
1323  KERNEL_DEBUG("setWeight(%d,%d,%d,%f,%s): constrained weight %f to [%f,%f]", connId, neurIdPre, neurIdPost,
1324  weight, (updateWeightRange?"true":"false"), weight, minWt, maxWt);
1325  }
1326  }
1327 
1328  // find real ID of pre- and post-neuron
1329  int neurIdPreReal = groupConfigs[netId][prelGrpId].lStartN + neurIdPre;
1330  int neurIdPostReal = groupConfigs[netId][postlGrpId].lStartN + neurIdPost;
1331 
1332  // iterate over all presynaptic synapses until right one is found
1333  bool synapseFound = false;
1334  int pos_ij = managerRuntimeData.cumulativePre[neurIdPostReal];
1335  for (int j = 0; j < managerRuntimeData.Npre[neurIdPostReal]; pos_ij++, j++) {
1336  SynInfo* preId = &(managerRuntimeData.preSynapticIds[pos_ij]);
1337  int pre_nid = GET_CONN_NEURON_ID((*preId));
1338  if (GET_CONN_NEURON_ID((*preId)) == neurIdPreReal) {
1339  assert(managerRuntimeData.connIdsPreIdx[pos_ij] == connId); // make sure we've got the right connection ID
1340 
1341  managerRuntimeData.wt[pos_ij] = isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? weight : -1.0 * weight;
1342  managerRuntimeData.maxSynWt[pos_ij] = isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? maxWt : -1.0 * maxWt;
1343 
1344  if (netId < CPU_RUNTIME_BASE) {
1345 #ifndef __NO_CUDA__
1346  // need to update datastructures on GPU runtime
1347  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.wt[pos_ij], sizeof(float), cudaMemcpyHostToDevice));
1348  if (runtimeData[netId].maxSynWt != NULL) {
1349  // only copy maxSynWt if datastructure actually exists on the GPU runtime
1350  // (that logic should be done elsewhere though)
1351  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.maxSynWt[pos_ij], sizeof(float), cudaMemcpyHostToDevice));
1352  }
1353 #else
1354  assert(false);
1355 #endif
1356  } else {
1357  // need to update datastructures on CPU runtime
1358  memcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.wt[pos_ij], sizeof(float));
1359  if (runtimeData[netId].maxSynWt != NULL) {
1360  // only copy maxSynWt if datastructure actually exists on the CPU runtime
1361  // (that logic should be done elsewhere though)
1362  memcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.maxSynWt[pos_ij], sizeof(float));
1363  }
1364  }
1365 
1366  // synapse found and updated: we're done!
1367  synapseFound = true;
1368  break;
1369  }
1370  }
1371 
1372  if (!synapseFound) {
1373  KERNEL_WARN("setWeight(%d,%d,%d,%f,%s): Synapse does not exist, not updated.", connId, neurIdPre, neurIdPost,
1374  weight, (updateWeightRange?"true":"false"));
1375  }
1376 }
1377 
1378 void SNN::setExternalCurrent(int grpId, const std::vector<float>& current) {
1379  assert(grpId >= 0); assert(grpId < numGroups);
1380  assert(!isPoissonGroup(grpId));
1381  assert(current.size() == getGroupNumNeurons(grpId));
1382 
1383  int netId = groupConfigMDMap[grpId].netId;
1384  int lGrpId = groupConfigMDMap[grpId].lGrpId;
1385 
1386  // // update flag for faster handling at run-time
1387  // if (count_if(current.begin(), current.end(), isGreaterThanZero)) {
1388  // groupConfigs[0][grpId].WithCurrentInjection = true;
1389  // } else {
1390  // groupConfigs[0][grpId].WithCurrentInjection = false;
1391  // }
1392 
1393  // store external current in array
1394  for (int lNId = groupConfigs[netId][lGrpId].lStartN, j = 0; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++, j++) {
1395  managerRuntimeData.extCurrent[lNId] = current[j];
1396  }
1397 
1398  // copy to GPU if necessary
1399  // don't allocate; allocation done in generateRuntimeData
1400  if (netId < CPU_RUNTIME_BASE) {
1401  copyExternalCurrent(netId, lGrpId, &runtimeData[netId], cudaMemcpyHostToDevice, false);
1402  }
1403  else {
1404  copyExternalCurrent(netId, lGrpId, &runtimeData[netId], false);
1405  }
1406 }
1407 
1408 // writes network state to file
1409 // handling of file pointer should be handled externally: as far as this function is concerned, it is simply
1410 // trying to write to file
1411 void SNN::saveSimulation(FILE* fid, bool saveSynapseInfo) {
1412  int tmpInt;
1413  float tmpFloat;
1414 
1416 
1418  tmpInt = 294338571; // some int used to identify saveSimulation files
1419  if (!fwrite(&tmpInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1420 
1422  tmpFloat = 0.3f;
1423  if (!fwrite(&tmpFloat,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1424 
1426  tmpFloat = ((float)simTimeSec) + ((float)simTimeMs)/1000.0f;
1427  if (!fwrite(&tmpFloat,sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1428 
1430  stopTiming();
1431  tmpFloat = executionTime/1000.0f;
1432  if (!fwrite(&tmpFloat,sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1433 
1435 
1437  if (!fwrite(&glbNetworkConfig.numN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1438  int dummyInt = 0;
1439  //if (!fwrite(&numPreSynNet,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1440  // if (!fwrite(&dummyInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1441  //if (!fwrite(&numPostSynNet,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1442  // if (!fwrite(&dummyInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1443  if (!fwrite(&numGroups,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1444 
1446  char name[100];
1447  for (int gGrpId=0;gGrpId<numGroups;gGrpId++) {
1448  if (!fwrite(&groupConfigMDMap[gGrpId].gStartN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1449  if (!fwrite(&groupConfigMDMap[gGrpId].gEndN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1450 
1451  if (!fwrite(&groupConfigMap[gGrpId].grid.numX,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1452  if (!fwrite(&groupConfigMap[gGrpId].grid.numY,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1453  if (!fwrite(&groupConfigMap[gGrpId].grid.numZ,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1454 
1455  strncpy(name,groupConfigMap[gGrpId].grpName.c_str(),100);
1456  if (!fwrite(name,1,100,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1457  }
1458 
1459  if (!saveSynapseInfo) return;
1460 
1461  // Save number of local networks
1462  int net_count = 0;
1463  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
1464  if (!groupPartitionLists[netId].empty()) {
1465  net_count++;
1466  }
1467  }
1468  if (!fwrite(&net_count, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1469 
1470  // Save weights for each local network
1471  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
1472  if (!groupPartitionLists[netId].empty()) {
1473  // copy from runtimeData to managerRuntimeData
1474  fetchPreConnectionInfo(netId);
1475  fetchPostConnectionInfo(netId);
1476  fetchConnIdsLookupArray(netId);
1477  fetchSynapseState(netId);
1478 
1479  // save number of synapses that starting from local groups
1480  int numSynToSave = 0;
1481  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1482  if (grpIt->netId == netId) {
1483  numSynToSave += grpIt->numPostSynapses;
1484  }
1485  }
1486  if (!fwrite(&numSynToSave, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1487  // read synapse info from managerRuntimData
1488  int numSynSaved = 0;
1489  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) {
1490  unsigned int offset = managerRuntimeData.cumulativePost[lNId];
1491 
1492  // save each synapse starting from from neuron lNId
1493  for (int t = 0; t < glbNetworkConfig.maxDelay; t++) {
1494  DelayInfo dPar = managerRuntimeData.postDelayInfo[lNId*(glbNetworkConfig.maxDelay + 1)+t];
1495 
1496  for (int idx_d=dPar.delay_index_start; idx_d < (dPar.delay_index_start + dPar.delay_length); idx_d++) {
1497  SynInfo post_info = managerRuntimeData.postSynapticIds[offset + idx_d];
1498  int lNIdPost = GET_CONN_NEURON_ID(post_info);
1499  int lGrpIdPost = GET_CONN_GRP_ID(post_info);
1500  int preSynId = GET_CONN_SYN_ID(post_info);
1501  int pre_pos = managerRuntimeData.cumulativePre[lNIdPost] + preSynId;
1502  SynInfo pre_info = managerRuntimeData.preSynapticIds[pre_pos];
1503  int lNIdPre = GET_CONN_NEURON_ID(pre_info);
1504  int lGrpIdPre = GET_CONN_GRP_ID(pre_info);
1505  float weight = managerRuntimeData.wt[pre_pos];
1506  float maxWeight = managerRuntimeData.maxSynWt[pre_pos];
1507  int connId = managerRuntimeData.connIdsPreIdx[pre_pos];
1508  int delay = t+1;
1509 
1510  // convert local group id to global group id
1511  // convert local neuron id to neuron order in group
1512  int gGrpIdPre = groupConfigs[netId][lGrpIdPre].gGrpId;
1513  int gGrpIdPost = groupConfigs[netId][lGrpIdPost].gGrpId;
1514  int grpNIdPre = lNId - groupConfigs[netId][lGrpIdPre].lStartN;
1515  int grpNIdPost = lNIdPost - groupConfigs[netId][lGrpIdPost].lStartN;
1516 
1517  // we only save synapses starting from local groups since otherwise we will save external synapses twice
1518  // write order is based on function connectNeurons (no NetId & external_NetId)
1519  // inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, float initWt, float maxWt, uint8_t delay, int externalNetId)
1520  if (groupConfigMDMap[gGrpIdPre].netId == netId) {
1521  numSynSaved++;
1522  if (!fwrite(&gGrpIdPre, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1523  if (!fwrite(&gGrpIdPost, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1524  if (!fwrite(&grpNIdPre, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1525  if (!fwrite(&grpNIdPost, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1526  if (!fwrite(&connId, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1527  if (!fwrite(&weight, sizeof(float), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1528  if (!fwrite(&maxWeight, sizeof(float), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1529  if (!fwrite(&delay, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1530  }
1531  }
1532  }
1533  }
1534  assert(numSynSaved == numSynToSave);
1535  }
1536  }
1537 
1538 
1540  //if (simMode_ == GPU_MODE)
1541  // copyWeightState(&managerRuntimeData, &runtimeData[0], cudaMemcpyDeviceToHost, false);
1543 
1545  //if (saveSynapseInfo) {
1546  // for (int i = 0; i < numN; i++) {
1547  // unsigned int offset = managerRuntimeData.cumulativePost[i];
1548 
1549  // unsigned int count = 0;
1550  // for (int t=0;t<maxDelay_;t++) {
1551  // DelayInfo dPar = managerRuntimeData.postDelayInfo[i*(maxDelay_+1)+t];
1552 
1553  // for(int idx_d=dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++)
1554  // count++;
1555  // }
1556 
1557  // if (!fwrite(&count,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1558 
1559  // for (int t=0;t<maxDelay_;t++) {
1560  // DelayInfo dPar = managerRuntimeData.postDelayInfo[i*(maxDelay_+1)+t];
1561 
1562  // for(int idx_d=dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++) {
1563  // // get synaptic info...
1564  // SynInfo post_info = managerRuntimeData.postSynapticIds[offset + idx_d];
1565 
1566  // // get neuron id
1567  // //int p_i = (post_info&POST_SYN_NEURON_MASK);
1568  // unsigned int p_i = GET_CONN_NEURON_ID(post_info);
1569  // assert(p_i<numN);
1570 
1571  // // get syn id
1572  // unsigned int s_i = GET_CONN_SYN_ID(post_info);
1573  // //>>POST_SYN_NEURON_BITS)&POST_SYN_CONN_MASK;
1574  // assert(s_i<(managerRuntimeData.Npre[p_i]));
1575 
1576  // // get the cumulative position for quick access...
1577  // unsigned int pos_i = managerRuntimeData.cumulativePre[p_i] + s_i;
1578 
1579  // uint8_t delay = t+1;
1580  // uint8_t plastic = s_i < managerRuntimeData.Npre_plastic[p_i]; // plastic or fixed.
1581 
1582  // if (!fwrite(&i,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1583  // if (!fwrite(&p_i,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1584  // if (!fwrite(&(managerRuntimeData.wt[pos_i]),sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1585  // if (!fwrite(&(managerRuntimeData.maxSynWt[pos_i]),sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1586  // if (!fwrite(&delay,sizeof(uint8_t),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1587  // if (!fwrite(&plastic,sizeof(uint8_t),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1588  // if (!fwrite(&(managerRuntimeData.connIdsPreIdx[pos_i]),sizeof(short int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1589  // }
1590  // }
1591  // }
1592  //}
1593 }
1594 
1595 // writes population weights from gIDpre to gIDpost to file fname in binary
1596 //void SNN::writePopWeights(std::string fname, int grpIdPre, int grpIdPost) {
1597 // assert(grpIdPre>=0); assert(grpIdPost>=0);
1598 //
1599 // float* weights;
1600 // int matrixSize;
1601 // FILE* fid;
1602 // int numPre, numPost;
1603 // fid = fopen(fname.c_str(), "wb");
1604 // assert(fid != NULL);
1605 //
1606 // if(snnState == CONFIG_SNN || snnState == COMPILED_SNN || snnState == PARTITIONED_SNN){
1607 // KERNEL_ERROR("Simulation has not been run yet, cannot output weights.");
1608 // exitSimulation(1);
1609 // }
1610 //
1611 // SynInfo* preId;
1612 // int pre_nid, pos_ij;
1613 //
1614 // //population sizes
1615 // numPre = groupConfigs[0][grpIdPre].SizeN;
1616 // numPost = groupConfigs[0][grpIdPost].SizeN;
1617 //
1618 // //first iteration gets the number of synaptic weights to place in our
1619 // //weight matrix.
1620 // matrixSize=0;
1621 // //iterate over all neurons in the post group
1622 // for (int i=groupConfigs[0][grpIdPost].StartN; i<=groupConfigs[0][grpIdPost].EndN; i++) {
1623 // // for every post-neuron, find all pre
1624 // pos_ij = managerRuntimeData.cumulativePre[i]; // i-th neuron, j=0th synapse
1625 // //iterate over all presynaptic synapses
1626 // for(int j=0; j<managerRuntimeData.Npre[i]; pos_ij++,j++) {
1627 // preId = &managerRuntimeData.preSynapticIds[pos_ij];
1628 // pre_nid = GET_CONN_NEURON_ID((*preId)); // neuron id of pre
1629 // if (pre_nid<groupConfigs[0][grpIdPre].StartN || pre_nid>groupConfigs[0][grpIdPre].EndN)
1630 // continue; // connection does not belong to group grpIdPre
1631 // matrixSize++;
1632 // }
1633 // }
1634 //
1635 // //now we have the correct size
1636 // weights = new float[matrixSize];
1637 // //second iteration assigns the weights
1638 // int curr = 0; // iterator for return array
1639 // //iterate over all neurons in the post group
1640 // for (int i=groupConfigs[0][grpIdPost].StartN; i<=groupConfigs[0][grpIdPost].EndN; i++) {
1641 // // for every post-neuron, find all pre
1642 // pos_ij = managerRuntimeData.cumulativePre[i]; // i-th neuron, j=0th synapse
1643 // //do the GPU copy here. Copy the current weights from GPU to CPU.
1644 // if(simMode_==GPU_MODE){
1645 // copyWeightsGPU(i,grpIdPre);
1646 // }
1647 // //iterate over all presynaptic synapses
1648 // for(int j=0; j<managerRuntimeData.Npre[i]; pos_ij++,j++) {
1649 // preId = &(managerRuntimeData.preSynapticIds[pos_ij]);
1650 // pre_nid = GET_CONN_NEURON_ID((*preId)); // neuron id of pre
1651 // if (pre_nid<groupConfigs[0][grpIdPre].StartN || pre_nid>groupConfigs[0][grpIdPre].EndN)
1652 // continue; // connection does not belong to group grpIdPre
1653 // weights[curr] = managerRuntimeData.wt[pos_ij];
1654 // curr++;
1655 // }
1656 // }
1657 //
1658 // fwrite(weights,sizeof(float),matrixSize,fid);
1659 // fclose(fid);
1660 // //Let my memory FREE!!!
1661 // delete [] weights;
1662 //}
1663 
1664 
1668 
1669 // set new file pointer for all files
1670 // fp==NULL is code for don't change it
1671 // can be called in all logger modes; however, the analogous interface function can only be called in CUSTOM
1672 void SNN::setLogsFp(FILE* fpInf, FILE* fpErr, FILE* fpDeb, FILE* fpLog) {
1673  if (fpInf!=NULL) {
1674  if (fpInf_!=NULL && fpInf_!=stdout && fpInf_!=stderr)
1675  fclose(fpInf_);
1676  fpInf_ = fpInf;
1677  }
1678 
1679  if (fpErr!=NULL) {
1680  if (fpErr_ != NULL && fpErr_!=stdout && fpErr_!=stderr)
1681  fclose(fpErr_);
1682  fpErr_ = fpErr;
1683  }
1684 
1685  if (fpDeb!=NULL) {
1686  if (fpDeb_!=NULL && fpDeb_!=stdout && fpDeb_!=stderr)
1687  fclose(fpDeb_);
1688  fpDeb_ = fpDeb;
1689  }
1690 
1691  if (fpLog!=NULL) {
1692  if (fpLog_!=NULL && fpLog_!=stdout && fpLog_!=stderr)
1693  fclose(fpLog_);
1694  fpLog_ = fpLog;
1695  }
1696 }
1697 
1698 
1702 
1703 // loop over linked list entries to find a connection with the right pre-post pair, O(N)
1704 short int SNN::getConnectId(int grpIdPre, int grpIdPost) {
1705  short int connId = -1;
1706 
1707  for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
1708  if (it->second.grpSrc == grpIdPre && it->second.grpDest == grpIdPost) {
1709  connId = it->second.connId;
1710  break;
1711  }
1712  }
1713 
1714  return connId;
1715 }
1716 
1718  CHECK_CONNECTION_ID(connId, numConnections);
1719 
1720  if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1721  KERNEL_ERROR("Total Connections = %d", numConnections);
1722  KERNEL_ERROR("ConnectId (%d) cannot be recognized", connId);
1723  }
1724 
1725  return connectConfigMap[connId];
1726 }
1727 
1728 std::vector<float> SNN::getConductanceAMPA(int gGrpId) {
1729  assert(isSimulationWithCOBA());
1730 
1731  // copy data to the manager runtime
1732  fetchConductanceAMPA(gGrpId);
1733 
1734  std::vector<float> gAMPAvec;
1735  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1736  gAMPAvec.push_back(managerRuntimeData.gAMPA[gNId]);
1737  }
1738  return gAMPAvec;
1739 }
1740 
1741 std::vector<float> SNN::getConductanceNMDA(int gGrpId) {
1742  assert(isSimulationWithCOBA());
1743 
1744  // copy data to the manager runtime
1745  fetchConductanceNMDA(gGrpId);
1746 
1747  std::vector<float> gNMDAvec;
1748  if (isSimulationWithNMDARise()) {
1749  // need to construct conductance from rise and decay parts
1750  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1751  gNMDAvec.push_back(managerRuntimeData.gNMDA_d[gNId] - managerRuntimeData.gNMDA_r[gNId]);
1752  }
1753  } else {
1754  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1755  gNMDAvec.push_back(managerRuntimeData.gNMDA[gNId]);
1756  }
1757  }
1758  return gNMDAvec;
1759 }
1760 
1761 std::vector<float> SNN::getConductanceGABAa(int gGrpId) {
1762  assert(isSimulationWithCOBA());
1763 
1764  // copy data to the manager runtime
1765  fetchConductanceGABAa(gGrpId);
1766 
1767  std::vector<float> gGABAaVec;
1768  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1769  gGABAaVec.push_back(managerRuntimeData.gGABAa[gNId]);
1770  }
1771  return gGABAaVec;
1772 }
1773 
1774 std::vector<float> SNN::getConductanceGABAb(int gGrpId) {
1775  assert(isSimulationWithCOBA());
1776 
1777  // copy data to the manager runtime
1778  fetchConductanceGABAb(gGrpId);
1779 
1780  std::vector<float> gGABAbVec;
1781  if (isSimulationWithGABAbRise()) {
1782  // need to construct conductance from rise and decay parts
1783  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1784  gGABAbVec.push_back(managerRuntimeData.gGABAb_d[gNId] - managerRuntimeData.gGABAb_r[gNId]);
1785  }
1786  } else {
1787  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1788  gGABAbVec.push_back(managerRuntimeData.gGABAb[gNId]);
1789  }
1790  }
1791  return gGABAbVec;
1792 }
1793 
1794 // returns RangeDelay struct of a connection
1795 RangeDelay SNN::getDelayRange(short int connId) {
1796  assert(connId>=0 && connId<numConnections);
1797 
1798  return RangeDelay(connectConfigMap[connId].minDelay, connectConfigMap[connId].maxDelay);
1799 }
1800 
1801 // \TODO: bad API design (return allocated memory to user), consider to move this function to connection monitor
1802 uint8_t* SNN::getDelays(int gGrpIdPre, int gGrpIdPost, int& numPreN, int& numPostN) {
1803  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
1804  int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
1805  int lGrpIdPre = -1;
1806  uint8_t* delays;
1807 
1808  for (int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
1809  if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
1810  lGrpIdPre = lGrpId;
1811  break;
1812  }
1813  assert(lGrpIdPre != -1);
1814 
1815  numPreN = groupConfigMap[gGrpIdPre].numN;
1816  numPostN = groupConfigMap[gGrpIdPost].numN;
1817 
1818  delays = new uint8_t[numPreN * numPostN];
1819  memset(delays, 0, numPreN * numPostN);
1820 
1821  fetchPostConnectionInfo(netIdPost);
1822 
1823  for (int lNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN; lNIdPre < groupConfigs[netIdPost][lGrpIdPre].lEndN; lNIdPre++) {
1824  unsigned int offset = managerRuntimeData.cumulativePost[lNIdPre];
1825 
1826  for (int t = 0; t < glbNetworkConfig.maxDelay; t++) {
1827  DelayInfo dPar = managerRuntimeData.postDelayInfo[lNIdPre * (glbNetworkConfig.maxDelay + 1) + t];
1828 
1829  for(int idx_d = dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++) {
1830  // get synaptic info...
1831  SynInfo postSynInfo = managerRuntimeData.postSynapticIds[offset + idx_d];
1832 
1833  // get local post neuron id
1834  int lNIdPost = GET_CONN_NEURON_ID(postSynInfo);
1835  assert(lNIdPost < glbNetworkConfig.numN);
1836 
1837  if (lNIdPost >= groupConfigs[netIdPost][lGrpIdPost].lStartN && lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN) {
1838  delays[(lNIdPre - groupConfigs[netIdPost][lGrpIdPre].lStartN) + numPreN * (lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN)] = t + 1;
1839  }
1840  }
1841  }
1842  }
1843  return delays;
1844 }
1845 
1847  assert(gGrpId >= 0 && gGrpId < numGroups);
1848 
1849  return groupConfigMap[gGrpId].grid;
1850 }
1851 
1852 // find ID of group with name grpName
1853 int SNN::getGroupId(std::string grpName) {
1854  int grpId = -1;
1855  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
1856  if (groupConfigMap[gGrpId].grpName.compare(grpName) == 0) {
1857  grpId = gGrpId;
1858  break;
1859  }
1860  }
1861 
1862  return grpId;
1863 }
1864 
1865 std::string SNN::getGroupName(int gGrpId) {
1866  assert(gGrpId >= -1 && gGrpId < numGroups);
1867 
1868  if (gGrpId == ALL)
1869  return "ALL";
1870 
1871  return groupConfigMap[gGrpId].grpName;
1872 }
1873 
1875  GroupSTDPInfo gInfo;
1876 
1877  gInfo.WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
1878  gInfo.WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
1879  gInfo.WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
1880  gInfo.WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
1881  gInfo.WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
1882  gInfo.WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
1883  gInfo.WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
1884  gInfo.ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
1885  gInfo.ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
1886  gInfo.TAU_MINUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC;
1887  gInfo.TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
1888  gInfo.ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
1889  gInfo.ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
1890  gInfo.TAU_MINUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB;
1891  gInfo.TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
1892  gInfo.GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
1893  gInfo.BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
1894  gInfo.BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
1895  gInfo.LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
1896  gInfo.DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
1897 
1898  return gInfo;
1899 }
1900 
1903 
1904  gInfo.baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
1905  gInfo.base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
1906  gInfo.baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
1907  gInfo.baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
1908  gInfo.decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
1909  gInfo.decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
1910  gInfo.decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
1911  gInfo.decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
1912 
1913  return gInfo;
1914 }
1915 
1917  int gGrpId = -1;
1918  assert(gNId >= 0 && gNId < glbNetworkConfig.numN);
1919 
1920  // search for global group id
1921  for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
1922  if (gNId >= grpIt->second.gStartN && gNId <= grpIt->second.gEndN)
1923  gGrpId = grpIt->second.gGrpId;
1924  }
1925 
1926  // adjust neurId for neuron ID of first neuron in the group
1927  int neurId = gNId - groupConfigMDMap[gGrpId].gStartN;
1928 
1929  return getNeuronLocation3D(gGrpId, neurId);
1930 }
1931 
1932 Point3D SNN::getNeuronLocation3D(int gGrpId, int relNeurId) {
1933  Grid3D grid = groupConfigMap[gGrpId].grid;
1934  assert(gGrpId >= 0 && gGrpId < numGroups);
1935  assert(relNeurId >= 0 && relNeurId < getGroupNumNeurons(gGrpId));
1936 
1937  int intX = relNeurId % grid.numX;
1938  int intY = (relNeurId / grid.numX) % grid.numY;
1939  int intZ = relNeurId / (grid.numX * grid.numY);
1940 
1941  // get coordinates center around origin
1942  double coordX = grid.distX * intX + grid.offsetX;
1943  double coordY = grid.distY * intY + grid.offsetY;
1944  double coordZ = grid.distZ * intZ + grid.offsetZ;
1945  return Point3D(coordX, coordY, coordZ);
1946 }
1947 
1948 // returns the number of synaptic connections associated with this connection.
1949 int SNN::getNumSynapticConnections(short int connId) {
1950  //we didn't find the connection.
1951  if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1952  KERNEL_ERROR("Connection ID was not found. Quitting.");
1953  exitSimulation(1);
1954  }
1955 
1956  return connectConfigMap[connId].numberOfConnections;
1957 }
1958 
1959 // returns pointer to existing SpikeMonitor object, NULL else
1961  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1962 
1963  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1964  return spikeMonList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1965  } else {
1966  return NULL;
1967  }
1968 }
1969 
1971  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1972 
1973  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1974  return spikeMonCoreList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1975  } else {
1976  return NULL;
1977  }
1978 }
1979 
1980 // returns pointer to existing NeuronMonitor object, NULL else
1982  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1983 
1984  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1985  return neuronMonList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
1986  }
1987  else {
1988  return NULL;
1989  }
1990 }
1991 
1993  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1994 
1995  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1996  return neuronMonCoreList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
1997  }
1998  else {
1999  return NULL;
2000  }
2001 }
2002 
2004  assert(connId>=0 && connId<numConnections);
2005 
2006  return RangeWeight(0.0f, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt);
2007 }
2008 
2009 
2013 
2014 // all unsafe operations of SNN constructor
2015 void SNN::SNNinit() {
2016  // initialize snnState
2017  snnState = CONFIG_SNN;
2018 
2019  // set logger mode (defines where to print all status, error, and debug messages)
2020  switch (loggerMode_) {
2021  case USER:
2022  fpInf_ = stdout;
2023  fpErr_ = stderr;
2024  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2025  fpDeb_ = fopen("nul","w");
2026  #else
2027  fpDeb_ = fopen("/dev/null","w");
2028  #endif
2029  break;
2030  case DEVELOPER:
2031  fpInf_ = stdout;
2032  fpErr_ = stderr;
2033  fpDeb_ = stdout;
2034  break;
2035  case SHOWTIME:
2036  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2037  fpInf_ = fopen("nul","w");
2038  #else
2039  fpInf_ = fopen("/dev/null","w");
2040  #endif
2041  fpErr_ = stderr;
2042  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2043  fpDeb_ = fopen("nul","w");
2044  #else
2045  fpDeb_ = fopen("/dev/null","w");
2046  #endif
2047  break;
2048  case SILENT:
2049  case CUSTOM:
2050  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2051  fpInf_ = fopen("nul","w");
2052  fpErr_ = fopen("nul","w");
2053  fpDeb_ = fopen("nul","w");
2054  #else
2055  fpInf_ = fopen("/dev/null","w");
2056  fpErr_ = fopen("/dev/null","w");
2057  fpDeb_ = fopen("/dev/null","w");
2058  #endif
2059  break;
2060  default:
2061  fpErr_ = stderr; // need to open file stream first
2062  KERNEL_ERROR("Unknown logger mode");
2063  exit(UNKNOWN_LOGGER_ERROR);
2064 
2065  }
2066 
2067  // try to open log file in results folder: create if not exists
2068 #if defined(WIN32) || defined(WIN64)
2069  CreateDirectory("results", NULL);
2070  fpLog_ = fopen("results/carlsim.log", "w");
2071 #else
2072  struct stat sb;
2073  int createDir = 1;
2074  if (stat("results", &sb) == -1 || !S_ISDIR(sb.st_mode)) {
2075  // results dir does not exist, try to create:
2076  createDir = mkdir("results", 0777);
2077  }
2078 
2079  if (createDir == -1) {
2080  // tried to create dir, but failed
2081  fprintf(stderr, "Could not create directory \"results/\", which is required to "
2082  "store simulation results. Aborting simulation...\n");
2083  exit(NO_LOGGER_DIR_ERROR);
2084  } else {
2085  // open log file
2086  fpLog_ = fopen("results/carlsim.log", "w");
2087 
2088  if (createDir == 0) {
2089  // newly created dir: now that fpLog_/fpInf_ exist, inform user
2090  KERNEL_INFO("Created results directory \"results/\".");
2091  }
2092  }
2093 #endif
2094  if (fpLog_ == NULL) {
2095  fprintf(stderr, "Could not create the directory \"results/\" or the log file \"results/carlsim.log\""
2096  ", which is required to store simulation results. Aborting simulation...\n");
2097  exit(NO_LOGGER_DIR_ERROR);
2098  }
2099 
2100  KERNEL_INFO("*********************************************************************************");
2101  KERNEL_INFO("******************** Welcome to CARLsim %d.%d ***************************",
2103  KERNEL_INFO("*********************************************************************************\n");
2104 
2105  KERNEL_INFO("***************************** Configuring Network ********************************");
2106  KERNEL_INFO("Starting CARLsim simulation \"%s\" in %s mode",networkName_.c_str(),
2107  loggerMode_string[loggerMode_]);
2108  KERNEL_INFO("Random number seed: %d",randSeed_);
2109 
2110  time_t rawtime;
2111  struct tm * timeinfo;
2112  time(&rawtime);
2113  timeinfo = localtime(&rawtime);
2114  KERNEL_DEBUG("Current local time and date: %s", asctime(timeinfo));
2115 
2116  // init random seed
2117  srand48(randSeed_);
2118 
2119  simTimeRunStart = 0; simTimeRunStop = 0;
2120  simTimeLastRunSummary = 0;
2121  simTimeMs = 0; simTimeSec = 0; simTime = 0;
2122 
2123  numGroups = 0;
2124  numConnections = 0;
2125  numCompartmentConnections = 0;
2126  numSpikeGenGrps = 0;
2127  simulatorDeleted = false;
2128 
2129  cumExecutionTime = 0.0;
2130  executionTime = 0.0;
2131 
2132  spikeRateUpdated = false;
2133  numSpikeMonitor = 0;
2134  numNeuronMonitor = 0;
2135  numGroupMonitor = 0;
2136  numConnectionMonitor = 0;
2137 
2138  sim_with_compartments = false;
2139  sim_with_fixedwts = true; // default is true, will be set to false if there are any plastic synapses
2140  sim_with_conductances = false; // default is false
2141  sim_with_stdp = false;
2142  sim_with_modulated_stdp = false;
2143  sim_with_homeostasis = false;
2144  sim_with_stp = false;
2145  sim_in_testing = false;
2146 
2147  loadSimFID = NULL;
2148 
2149  // conductance info struct for simulation
2150  sim_with_NMDA_rise = false;
2151  sim_with_GABAb_rise = false;
2152  dAMPA = 1.0-1.0/5.0; // some default decay and rise times
2153  rNMDA = 1.0-1.0/10.0;
2154  dNMDA = 1.0-1.0/150.0;
2155  sNMDA = 1.0;
2156  dGABAa = 1.0-1.0/6.0;
2157  rGABAb = 1.0-1.0/100.0;
2158  dGABAb = 1.0-1.0/150.0;
2159  sGABAb = 1.0;
2160 
2161  // default integration method: Forward-Euler with 0.5ms integration step
2163 
2164  mulSynFast = NULL;
2165  mulSynSlow = NULL;
2166 
2167  // reset all monitors, don't deallocate (false)
2168  resetMonitors(false);
2169 
2170  resetGroupConfigs(false);
2171 
2172  resetConnectionConfigs(false);
2173 
2174  // initialize spike buffer
2175  spikeBuf = new SpikeBuffer(0, MAX_TIME_SLICE);
2176 
2177  memset(networkConfigs, 0, sizeof(NetworkConfigRT) * MAX_NET_PER_SNN);
2178 
2179  // reset all runtime data
2180  // GPU/CPU runtime data
2181  memset(runtimeData, 0, sizeof(RuntimeData) * MAX_NET_PER_SNN);
2182  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) // FIXME: redundant??
2183  runtimeData[netId].allocated = false;
2184 
2185  // Manager runtime data
2186  memset(&managerRuntimeData, 0, sizeof(RuntimeData));
2187  managerRuntimeData.allocated = false; // FIXME: redundant??
2188 
2189  // default weight update parameter
2190  wtANDwtChangeUpdateInterval_ = 1000; // update weights every 1000 ms (default)
2191  wtANDwtChangeUpdateIntervalCnt_ = 0; // helper var to implement fast modulo
2192  stdpScaleFactor_ = 1.0f;
2193  wtChangeDecay_ = 0.0f;
2194 
2195  // FIXME: use it when necessary
2196 #ifndef __NO_CUDA__
2197  CUDA_CREATE_TIMER(timer);
2198  CUDA_RESET_TIMER(timer);
2199 #endif
2200 }
2201 
2202 void SNN::advSimStep() {
2203  doSTPUpdateAndDecayCond();
2204 
2205  //KERNEL_INFO("STPUpdate!");
2206 
2207  spikeGeneratorUpdate();
2208 
2209  //KERNEL_INFO("spikeGeneratorUpdate!");
2210 
2211  findFiring();
2212 
2213  //KERNEL_INFO("Find firing!");
2214 
2215  updateTimingTable();
2216 
2217  routeSpikes();
2218 
2219  doCurrentUpdate();
2220 
2221  //KERNEL_INFO("doCurrentUpdate!");
2222 
2223  globalStateUpdate();
2224 
2225  //KERNEL_INFO("globalStateUpdate!");
2226 
2227  clearExtFiringTable();
2228 }
2229 
2230 void SNN::doSTPUpdateAndDecayCond() {
2231  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2232  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2233  cpu_set_t cpus;
2234  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2235  int threadCount = 0;
2236  #endif
2237 
2238  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2239  if (!groupPartitionLists[netId].empty()) {
2240  assert(runtimeData[netId].allocated);
2241  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2242  doSTPUpdateAndDecayCond_GPU(netId);
2243  else{//CPU runtime
2244  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2245  doSTPUpdateAndDecayCond_CPU(netId);
2246  #else // Linux or MAC
2247  pthread_attr_t attr;
2248  pthread_attr_init(&attr);
2249  CPU_ZERO(&cpus);
2250  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2251  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2252 
2253  argsThreadRoutine[threadCount].snn_pointer = this;
2254  argsThreadRoutine[threadCount].netId = netId;
2255  argsThreadRoutine[threadCount].lGrpId = 0;
2256  argsThreadRoutine[threadCount].startIdx = 0;
2257  argsThreadRoutine[threadCount].endIdx = 0;
2258  argsThreadRoutine[threadCount].GtoLOffset = 0;
2259 
2260  pthread_create(&threads[threadCount], &attr, &SNN::helperDoSTPUpdateAndDecayCond_CPU, (void*)&argsThreadRoutine[threadCount]);
2261  pthread_attr_destroy(&attr);
2262  threadCount++;
2263  #endif
2264  }
2265  }
2266  }
2267 
2268  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2269  // join all the threads
2270  for (int i=0; i<threadCount; i++){
2271  pthread_join(threads[i], NULL);
2272  }
2273  #endif
2274 }
2275 
2276 void SNN::spikeGeneratorUpdate() {
2277  // If poisson rate has been updated, assign new poisson rate
2278  if (spikeRateUpdated) {
2279  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2280  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2281  cpu_set_t cpus;
2282  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2283  int threadCount = 0;
2284  #endif
2285 
2286  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2287  if (!groupPartitionLists[netId].empty()) {
2288  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2289  assignPoissonFiringRate_GPU(netId);
2290  else{ // CPU runtime
2291  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2292  assignPoissonFiringRate_CPU(netId);
2293  #else // Linux or MAC
2294  pthread_attr_t attr;
2295  pthread_attr_init(&attr);
2296  CPU_ZERO(&cpus);
2297  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2298  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2299 
2300  argsThreadRoutine[threadCount].snn_pointer = this;
2301  argsThreadRoutine[threadCount].netId = netId;
2302  argsThreadRoutine[threadCount].lGrpId = 0;
2303  argsThreadRoutine[threadCount].startIdx = 0;
2304  argsThreadRoutine[threadCount].endIdx = 0;
2305  argsThreadRoutine[threadCount].GtoLOffset = 0;
2306 
2307  pthread_create(&threads[threadCount], &attr, &SNN::helperAssignPoissonFiringRate_CPU, (void*)&argsThreadRoutine[threadCount]);
2308  pthread_attr_destroy(&attr);
2309  threadCount++;
2310  #endif
2311  }
2312  }
2313  }
2314 
2315  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2316  // join all the threads
2317  for (int i=0; i<threadCount; i++){
2318  pthread_join(threads[i], NULL);
2319  }
2320  #endif
2321 
2322  spikeRateUpdated = false;
2323  }
2324 
2325  // If time slice has expired, check if new spikes needs to be generated by user-defined spike generators
2326  generateUserDefinedSpikes();
2327 
2328  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2329  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2330  cpu_set_t cpus;
2331  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2332  int threadCount = 0;
2333  #endif
2334 
2335  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2336  if (!groupPartitionLists[netId].empty()) {
2337  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2338  spikeGeneratorUpdate_GPU(netId);
2339  else{ // CPU runtime
2340  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2341  spikeGeneratorUpdate_CPU(netId);
2342  #else // Linux or MAC
2343  pthread_attr_t attr;
2344  pthread_attr_init(&attr);
2345  CPU_ZERO(&cpus);
2346  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2347  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2348 
2349  argsThreadRoutine[threadCount].snn_pointer = this;
2350  argsThreadRoutine[threadCount].netId = netId;
2351  argsThreadRoutine[threadCount].lGrpId = 0;
2352  argsThreadRoutine[threadCount].startIdx = 0;
2353  argsThreadRoutine[threadCount].endIdx = 0;
2354  argsThreadRoutine[threadCount].GtoLOffset = 0;
2355 
2356  pthread_create(&threads[threadCount], &attr, &SNN::helperSpikeGeneratorUpdate_CPU, (void*)&argsThreadRoutine[threadCount]);
2357  pthread_attr_destroy(&attr);
2358  threadCount++;
2359  #endif
2360  }
2361  }
2362  }
2363 
2364  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2365  // join all the threads
2366  for (int i=0; i<threadCount; i++){
2367  pthread_join(threads[i], NULL);
2368  }
2369  #endif
2370 
2371  // tell the spike buffer to advance to the next time step
2372  spikeBuf->step();
2373 }
2374 
2375 void SNN::findFiring() {
2376  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2377  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2378  cpu_set_t cpus;
2379  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2380  int threadCount = 0;
2381  #endif
2382 
2383  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2384  if (!groupPartitionLists[netId].empty()) {
2385  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2386  findFiring_GPU(netId);
2387  else {// CPU runtime
2388  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2389  findFiring_CPU(netId);
2390  #else // Linux or MAC
2391  pthread_attr_t attr;
2392  pthread_attr_init(&attr);
2393  CPU_ZERO(&cpus);
2394  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2395  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2396 
2397  argsThreadRoutine[threadCount].snn_pointer = this;
2398  argsThreadRoutine[threadCount].netId = netId;
2399  argsThreadRoutine[threadCount].lGrpId = 0;
2400  argsThreadRoutine[threadCount].startIdx = 0;
2401  argsThreadRoutine[threadCount].endIdx = 0;
2402  argsThreadRoutine[threadCount].GtoLOffset = 0;
2403 
2404  pthread_create(&threads[threadCount], &attr, &SNN::helperFindFiring_CPU, (void*)&argsThreadRoutine[threadCount]);
2405  pthread_attr_destroy(&attr);
2406  threadCount++;
2407  #endif
2408  }
2409  }
2410  }
2411 
2412  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2413  // join all the threads
2414  for (int i=0; i<threadCount; i++){
2415  pthread_join(threads[i], NULL);
2416  }
2417  #endif
2418 }
2419 
2420 void SNN::doCurrentUpdate() {
2421  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2422  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2423  cpu_set_t cpus;
2424  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2425  int threadCount = 0;
2426  #endif
2427 
2428  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2429  if (!groupPartitionLists[netId].empty()) {
2430  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2431  doCurrentUpdateD2_GPU(netId);
2432  else{ // CPU runtime
2433  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2434  doCurrentUpdateD2_CPU(netId);
2435  #else // Linux or MAC
2436  pthread_attr_t attr;
2437  pthread_attr_init(&attr);
2438  CPU_ZERO(&cpus);
2439  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2440  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2441 
2442  argsThreadRoutine[threadCount].snn_pointer = this;
2443  argsThreadRoutine[threadCount].netId = netId;
2444  argsThreadRoutine[threadCount].lGrpId = 0;
2445  argsThreadRoutine[threadCount].startIdx = 0;
2446  argsThreadRoutine[threadCount].endIdx = 0;
2447  argsThreadRoutine[threadCount].GtoLOffset = 0;
2448 
2449  pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD2_CPU, (void*)&argsThreadRoutine[threadCount]);
2450  pthread_attr_destroy(&attr);
2451  threadCount++;
2452  #endif
2453  }
2454  }
2455  }
2456 
2457  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2458  // join all the threads
2459  for (int i=0; i<threadCount; i++){
2460  pthread_join(threads[i], NULL);
2461  }
2462  threadCount = 0;
2463  #endif
2464 
2465  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2466  if (!groupPartitionLists[netId].empty()) {
2467  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2468  doCurrentUpdateD1_GPU(netId);
2469  else{ // CPU runtime
2470  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2471  doCurrentUpdateD1_CPU(netId);
2472  #else // Linux or MAC
2473  pthread_attr_t attr;
2474  pthread_attr_init(&attr);
2475  CPU_ZERO(&cpus);
2476  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2477  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2478 
2479  argsThreadRoutine[threadCount].snn_pointer = this;
2480  argsThreadRoutine[threadCount].netId = netId;
2481  argsThreadRoutine[threadCount].lGrpId = 0;
2482  argsThreadRoutine[threadCount].startIdx = 0;
2483  argsThreadRoutine[threadCount].endIdx = 0;
2484  argsThreadRoutine[threadCount].GtoLOffset = 0;
2485 
2486  pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD1_CPU, (void*)&argsThreadRoutine[threadCount]);
2487  pthread_attr_destroy(&attr);
2488  threadCount++;
2489  #endif
2490  }
2491  }
2492  }
2493 
2494  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2495  // join all the threads
2496  for (int i=0; i<threadCount; i++){
2497  pthread_join(threads[i], NULL);
2498  }
2499  #endif
2500 }
2501 
2502 void SNN::updateTimingTable() {
2503  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2504  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2505  cpu_set_t cpus;
2506  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2507  int threadCount = 0;
2508  #endif
2509 
2510  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2511  if (!groupPartitionLists[netId].empty()) {
2512  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2513  updateTimingTable_GPU(netId);
2514  else{ // CPU runtime
2515  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2516  updateTimingTable_CPU(netId);
2517  #else // Linux or MAC
2518  pthread_attr_t attr;
2519  pthread_attr_init(&attr);
2520  CPU_ZERO(&cpus);
2521  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2522  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2523 
2524  argsThreadRoutine[threadCount].snn_pointer = this;
2525  argsThreadRoutine[threadCount].netId = netId;
2526  argsThreadRoutine[threadCount].lGrpId = 0;
2527  argsThreadRoutine[threadCount].startIdx = 0;
2528  argsThreadRoutine[threadCount].endIdx = 0;
2529  argsThreadRoutine[threadCount].GtoLOffset = 0;
2530 
2531  pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateTimingTable_CPU, (void*)&argsThreadRoutine[threadCount]);
2532  pthread_attr_destroy(&attr);
2533  threadCount++;
2534  #endif
2535  }
2536  }
2537  }
2538  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2539  // join all the threads
2540  for (int i=0; i<threadCount; i++){
2541  pthread_join(threads[i], NULL);
2542  }
2543  #endif
2544 }
2545 
2546 void SNN::globalStateUpdate() {
2547  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2548  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2549  cpu_set_t cpus;
2550  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2551  int threadCount = 0;
2552  #endif
2553 
2554  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2555  if (!groupPartitionLists[netId].empty()) {
2556  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2557  globalStateUpdate_C_GPU(netId);
2558  else{ // CPU runtime
2559  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2560  globalStateUpdate_CPU(netId);
2561  #else // Linux or MAC
2562  pthread_attr_t attr;
2563  pthread_attr_init(&attr);
2564  CPU_ZERO(&cpus);
2565  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2566  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2567 
2568  argsThreadRoutine[threadCount].snn_pointer = this;
2569  argsThreadRoutine[threadCount].netId = netId;
2570  argsThreadRoutine[threadCount].lGrpId = 0;
2571  argsThreadRoutine[threadCount].startIdx = 0;
2572  argsThreadRoutine[threadCount].endIdx = 0;
2573  argsThreadRoutine[threadCount].GtoLOffset = 0;
2574 
2575  pthread_create(&threads[threadCount], &attr, &SNN::helperGlobalStateUpdate_CPU, (void*)&argsThreadRoutine[threadCount]);
2576  pthread_attr_destroy(&attr);
2577  threadCount++;
2578  #endif
2579  }
2580  }
2581  }
2582 
2583  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2584  // join all the threads
2585  for (int i=0; i<threadCount; i++){
2586  pthread_join(threads[i], NULL);
2587  }
2588  #endif
2589 
2590  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2591  if (!groupPartitionLists[netId].empty()) {
2592  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2593  globalStateUpdate_N_GPU(netId);
2594  }
2595  }
2596 
2597  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2598  if (!groupPartitionLists[netId].empty()) {
2599  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2600  globalStateUpdate_G_GPU(netId);
2601  }
2602  }
2603 }
2604 
2605 void SNN::clearExtFiringTable() {
2606  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2607  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2608  cpu_set_t cpus;
2609  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2610  int threadCount = 0;
2611  #endif
2612 
2613  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2614  if (!groupPartitionLists[netId].empty()) {
2615  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2616  clearExtFiringTable_GPU(netId);
2617  else{ // CPU runtime
2618  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2619  clearExtFiringTable_CPU(netId);
2620  #else // Linux or MAC
2621  pthread_attr_t attr;
2622  pthread_attr_init(&attr);
2623  CPU_ZERO(&cpus);
2624  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2625  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2626 
2627  argsThreadRoutine[threadCount].snn_pointer = this;
2628  argsThreadRoutine[threadCount].netId = netId;
2629  argsThreadRoutine[threadCount].lGrpId = 0;
2630  argsThreadRoutine[threadCount].startIdx = 0;
2631  argsThreadRoutine[threadCount].endIdx = 0;
2632  argsThreadRoutine[threadCount].GtoLOffset = 0;
2633 
2634  pthread_create(&threads[threadCount], &attr, &SNN::helperClearExtFiringTable_CPU, (void*)&argsThreadRoutine[threadCount]);
2635  pthread_attr_destroy(&attr);
2636  threadCount++;
2637  #endif
2638  }
2639  }
2640  }
2641 
2642  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2643  // join all the threads
2644  for (int i=0; i<threadCount; i++){
2645  pthread_join(threads[i], NULL);
2646  }
2647  #endif
2648 }
2649 
2650 void SNN::updateWeights() {
2651  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2652  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2653  cpu_set_t cpus;
2654  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2655  int threadCount = 0;
2656  #endif
2657 
2658  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2659  if (!groupPartitionLists[netId].empty()) {
2660  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2661  updateWeights_GPU(netId);
2662  else{ // CPU runtime
2663  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2664  updateWeights_CPU(netId);
2665  #else // Linux or MAC
2666  pthread_attr_t attr;
2667  pthread_attr_init(&attr);
2668  CPU_ZERO(&cpus);
2669  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2670  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2671 
2672  argsThreadRoutine[threadCount].snn_pointer = this;
2673  argsThreadRoutine[threadCount].netId = netId;
2674  argsThreadRoutine[threadCount].lGrpId = 0;
2675  argsThreadRoutine[threadCount].startIdx = 0;
2676  argsThreadRoutine[threadCount].endIdx = 0;
2677  argsThreadRoutine[threadCount].GtoLOffset = 0;
2678 
2679  pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateWeights_CPU, (void*)&argsThreadRoutine[threadCount]);
2680  pthread_attr_destroy(&attr);
2681  threadCount++;
2682  #endif
2683  }
2684  }
2685  }
2686  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2687  // join all the threads
2688  for (int i=0; i<threadCount; i++){
2689  pthread_join(threads[i], NULL);
2690  }
2691  #endif
2692 
2693 }
2694 
2695 void SNN::updateNetworkConfig(int netId) {
2696  assert(netId < MAX_NET_PER_SNN);
2697 
2698  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2699  copyNetworkConfig(netId, cudaMemcpyHostToDevice);
2700  else
2701  copyNetworkConfig(netId); // CPU runtime
2702 }
2703 
2704 void SNN::shiftSpikeTables() {
2705  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2706  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2707  cpu_set_t cpus;
2708  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2709  int threadCount = 0;
2710  #endif
2711 
2712  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2713  if (!groupPartitionLists[netId].empty()) {
2714  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2715  shiftSpikeTables_F_GPU(netId);
2716  else { // CPU runtime
2717  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2718  shiftSpikeTables_CPU(netId);
2719  #else // Linux or MAC
2720  pthread_attr_t attr;
2721  pthread_attr_init(&attr);
2722  CPU_ZERO(&cpus);
2723  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2724  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2725 
2726  argsThreadRoutine[threadCount].snn_pointer = this;
2727  argsThreadRoutine[threadCount].netId = netId;
2728  argsThreadRoutine[threadCount].lGrpId = 0;
2729  argsThreadRoutine[threadCount].startIdx = 0;
2730  argsThreadRoutine[threadCount].endIdx = 0;
2731  argsThreadRoutine[threadCount].GtoLOffset = 0;
2732 
2733  pthread_create(&threads[threadCount], &attr, &SNN::helperShiftSpikeTables_CPU, (void*)&argsThreadRoutine[threadCount]);
2734  pthread_attr_destroy(&attr);
2735  threadCount++;
2736  #endif
2737  }
2738  }
2739  }
2740 
2741  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2742  // join all the threads
2743  for (int i=0; i<threadCount; i++){
2744  pthread_join(threads[i], NULL);
2745  }
2746  #endif
2747 
2748  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2749  if (!groupPartitionLists[netId].empty()) {
2750  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2751  shiftSpikeTables_T_GPU(netId);
2752  }
2753  }
2754 }
2755 
2756 void SNN::allocateSNN(int netId) {
2757  assert(netId > ANY && netId < MAX_NET_PER_SNN);
2758 
2759  if (netId < CPU_RUNTIME_BASE)
2760  allocateSNN_GPU(netId);
2761  else
2762  allocateSNN_CPU(netId);
2763 }
2764 
2765 void SNN::allocateManagerRuntimeData() {
2766  // reset variable related to spike count
2767  managerRuntimeData.spikeCountSec = 0;
2768  managerRuntimeData.spikeCountD1Sec = 0;
2769  managerRuntimeData.spikeCountD2Sec = 0;
2770  managerRuntimeData.spikeCountLastSecLeftD2 = 0;
2771  managerRuntimeData.spikeCount = 0;
2772  managerRuntimeData.spikeCountD1 = 0;
2773  managerRuntimeData.spikeCountD2 = 0;
2774  managerRuntimeData.nPoissonSpikes = 0;
2775  managerRuntimeData.spikeCountExtRxD1 = 0;
2776  managerRuntimeData.spikeCountExtRxD2 = 0;
2777 
2778  managerRuntimeData.voltage = new float[managerRTDSize.maxNumNReg];
2779  managerRuntimeData.nextVoltage = new float[managerRTDSize.maxNumNReg];
2780  managerRuntimeData.recovery = new float[managerRTDSize.maxNumNReg];
2781  managerRuntimeData.Izh_a = new float[managerRTDSize.maxNumNReg];
2782  managerRuntimeData.Izh_b = new float[managerRTDSize.maxNumNReg];
2783  managerRuntimeData.Izh_c = new float[managerRTDSize.maxNumNReg];
2784  managerRuntimeData.Izh_d = new float[managerRTDSize.maxNumNReg];
2785  managerRuntimeData.Izh_C = new float[managerRTDSize.maxNumNReg];
2786  managerRuntimeData.Izh_k = new float[managerRTDSize.maxNumNReg];
2787  managerRuntimeData.Izh_vr = new float[managerRTDSize.maxNumNReg];
2788  managerRuntimeData.Izh_vt = new float[managerRTDSize.maxNumNReg];
2789  managerRuntimeData.Izh_vpeak = new float[managerRTDSize.maxNumNReg];
2790  managerRuntimeData.lif_tau_m = new int[managerRTDSize.maxNumNReg];
2791  managerRuntimeData.lif_tau_ref = new int[managerRTDSize.maxNumNReg];
2792  managerRuntimeData.lif_tau_ref_c = new int[managerRTDSize.maxNumNReg];
2793  managerRuntimeData.lif_vTh = new float[managerRTDSize.maxNumNReg];
2794  managerRuntimeData.lif_vReset = new float[managerRTDSize.maxNumNReg];
2795  managerRuntimeData.lif_gain = new float[managerRTDSize.maxNumNReg];
2796  managerRuntimeData.lif_bias = new float[managerRTDSize.maxNumNReg];
2797  managerRuntimeData.current = new float[managerRTDSize.maxNumNReg];
2798  managerRuntimeData.extCurrent = new float[managerRTDSize.maxNumNReg];
2799  managerRuntimeData.totalCurrent = new float[managerRTDSize.maxNumNReg];
2800  managerRuntimeData.curSpike = new bool[managerRTDSize.maxNumNReg];
2801  memset(managerRuntimeData.voltage, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2802  memset(managerRuntimeData.nextVoltage, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2803  memset(managerRuntimeData.recovery, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2804  memset(managerRuntimeData.Izh_a, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2805  memset(managerRuntimeData.Izh_b, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2806  memset(managerRuntimeData.Izh_c, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2807  memset(managerRuntimeData.Izh_d, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2808  memset(managerRuntimeData.Izh_C, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2809  memset(managerRuntimeData.Izh_k, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2810  memset(managerRuntimeData.Izh_vr, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2811  memset(managerRuntimeData.Izh_vt, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2812  memset(managerRuntimeData.Izh_vpeak, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2813  memset(managerRuntimeData.lif_tau_m, 0, sizeof(int) * managerRTDSize.maxNumNReg);
2814  memset(managerRuntimeData.lif_tau_ref, 0, sizeof(int) * managerRTDSize.maxNumNReg);
2815  memset(managerRuntimeData.lif_tau_ref_c, 0, sizeof(int) * managerRTDSize.maxNumNReg);
2816  memset(managerRuntimeData.lif_vTh, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2817  memset(managerRuntimeData.lif_vReset, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2818  memset(managerRuntimeData.lif_gain, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2819  memset(managerRuntimeData.lif_bias, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2820  memset(managerRuntimeData.current, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2821  memset(managerRuntimeData.extCurrent, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2822  memset(managerRuntimeData.totalCurrent, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2823  memset(managerRuntimeData.curSpike, 0, sizeof(bool) * managerRTDSize.maxNumNReg);
2824 
2825  managerRuntimeData.nVBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups]; // 1 second v buffer
2826  managerRuntimeData.nUBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups];
2827  managerRuntimeData.nIBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups];
2828  memset(managerRuntimeData.nVBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
2829  memset(managerRuntimeData.nUBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
2830  memset(managerRuntimeData.nIBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
2831 
2832  managerRuntimeData.gAMPA = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2833  managerRuntimeData.gNMDA_r = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2834  managerRuntimeData.gNMDA_d = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2835  managerRuntimeData.gNMDA = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2836  memset(managerRuntimeData.gAMPA, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2837  memset(managerRuntimeData.gNMDA_r, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2838  memset(managerRuntimeData.gNMDA_d, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2839  memset(managerRuntimeData.gNMDA, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2840 
2841  managerRuntimeData.gGABAa = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2842  managerRuntimeData.gGABAb_r = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2843  managerRuntimeData.gGABAb_d = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2844  managerRuntimeData.gGABAb = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2845  memset(managerRuntimeData.gGABAa, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2846  memset(managerRuntimeData.gGABAb_r, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2847  memset(managerRuntimeData.gGABAb_d, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2848  memset(managerRuntimeData.gGABAb, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2849 
2850  // allocate neuromodulators and their assistive buffers
2851  managerRuntimeData.grpDA = new float[managerRTDSize.maxNumGroups];
2852  managerRuntimeData.grp5HT = new float[managerRTDSize.maxNumGroups];
2853  managerRuntimeData.grpACh = new float[managerRTDSize.maxNumGroups];
2854  managerRuntimeData.grpNE = new float[managerRTDSize.maxNumGroups];
2855  memset(managerRuntimeData.grpDA, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2856  memset(managerRuntimeData.grp5HT, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2857  memset(managerRuntimeData.grpACh, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2858  memset(managerRuntimeData.grpNE, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2859 
2860 
2861  managerRuntimeData.grpDABuffer = new float[managerRTDSize.maxNumGroups * 1000]; // 1 second DA buffer
2862  managerRuntimeData.grp5HTBuffer = new float[managerRTDSize.maxNumGroups * 1000];
2863  managerRuntimeData.grpAChBuffer = new float[managerRTDSize.maxNumGroups * 1000];
2864  managerRuntimeData.grpNEBuffer = new float[managerRTDSize.maxNumGroups * 1000];
2865  memset(managerRuntimeData.grpDABuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2866  memset(managerRuntimeData.grp5HTBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2867  memset(managerRuntimeData.grpAChBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2868  memset(managerRuntimeData.grpNEBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2869 
2870  managerRuntimeData.lastSpikeTime = new int[managerRTDSize.maxNumNAssigned];
2871  memset(managerRuntimeData.lastSpikeTime, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
2872 
2873  managerRuntimeData.nSpikeCnt = new int[managerRTDSize.glbNumN];
2874  memset(managerRuntimeData.nSpikeCnt, 0, sizeof(int) * managerRTDSize.glbNumN); // sufficient to hold all neurons in the global network
2875 
2877  managerRuntimeData.avgFiring = new float[managerRTDSize.maxNumN];
2878  managerRuntimeData.baseFiring = new float[managerRTDSize.maxNumN];
2879  memset(managerRuntimeData.avgFiring, 0, sizeof(float) * managerRTDSize.maxNumN);
2880  memset(managerRuntimeData.baseFiring, 0, sizeof(float) * managerRTDSize.maxNumN);
2881 
2882  // STP can be applied to spike generators, too -> numN
2883  // \TODO: The size of these data structures could be reduced to the max synaptic delay of all
2884  // connections with STP. That number might not be the same as maxDelay_.
2885  managerRuntimeData.stpu = new float[managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1)];
2886  managerRuntimeData.stpx = new float[managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1)];
2887  memset(managerRuntimeData.stpu, 0, sizeof(float) * managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1));
2888  memset(managerRuntimeData.stpx, 0, sizeof(float) * managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1));
2889 
2890  managerRuntimeData.Npre = new unsigned short[managerRTDSize.maxNumNAssigned];
2891  managerRuntimeData.Npre_plastic = new unsigned short[managerRTDSize.maxNumNAssigned];
2892  managerRuntimeData.Npost = new unsigned short[managerRTDSize.maxNumNAssigned];
2893  managerRuntimeData.cumulativePost = new unsigned int[managerRTDSize.maxNumNAssigned];
2894  managerRuntimeData.cumulativePre = new unsigned int[managerRTDSize.maxNumNAssigned];
2895  memset(managerRuntimeData.Npre, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
2896  memset(managerRuntimeData.Npre_plastic, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
2897  memset(managerRuntimeData.Npost, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
2898  memset(managerRuntimeData.cumulativePost, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
2899  memset(managerRuntimeData.cumulativePre, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
2900 
2901  managerRuntimeData.postSynapticIds = new SynInfo[managerRTDSize.maxNumPostSynNet];
2902  managerRuntimeData.postDelayInfo = new DelayInfo[managerRTDSize.maxNumNAssigned * (glbNetworkConfig.maxDelay + 1)];
2903  memset(managerRuntimeData.postSynapticIds, 0, sizeof(SynInfo) * managerRTDSize.maxNumPostSynNet);
2904  memset(managerRuntimeData.postDelayInfo, 0, sizeof(DelayInfo) * managerRTDSize.maxNumNAssigned * (glbNetworkConfig.maxDelay + 1));
2905 
2906  managerRuntimeData.preSynapticIds = new SynInfo[managerRTDSize.maxNumPreSynNet];
2907  memset(managerRuntimeData.preSynapticIds, 0, sizeof(SynInfo) * managerRTDSize.maxNumPreSynNet);
2908 
2909  managerRuntimeData.wt = new float[managerRTDSize.maxNumPreSynNet];
2910  managerRuntimeData.wtChange = new float[managerRTDSize.maxNumPreSynNet];
2911  managerRuntimeData.maxSynWt = new float[managerRTDSize.maxNumPreSynNet];
2912  managerRuntimeData.synSpikeTime = new int[managerRTDSize.maxNumPreSynNet];
2913  memset(managerRuntimeData.wt, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
2914  memset(managerRuntimeData.wtChange, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
2915  memset(managerRuntimeData.maxSynWt, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
2916  memset(managerRuntimeData.synSpikeTime, 0, sizeof(int) * managerRTDSize.maxNumPreSynNet);
2917 
2918  mulSynFast = new float[managerRTDSize.maxNumConnections];
2919  mulSynSlow = new float[managerRTDSize.maxNumConnections];
2920  memset(mulSynFast, 0, sizeof(float) * managerRTDSize.maxNumConnections);
2921  memset(mulSynSlow, 0, sizeof(float) * managerRTDSize.maxNumConnections);
2922 
2923  managerRuntimeData.connIdsPreIdx = new short int[managerRTDSize.maxNumPreSynNet];
2924  memset(managerRuntimeData.connIdsPreIdx, 0, sizeof(short int) * managerRTDSize.maxNumPreSynNet);
2925 
2926  managerRuntimeData.grpIds = new short int[managerRTDSize.maxNumNAssigned];
2927  memset(managerRuntimeData.grpIds, 0, sizeof(short int) * managerRTDSize.maxNumNAssigned);
2928 
2929  managerRuntimeData.spikeGenBits = new unsigned int[managerRTDSize.maxNumNSpikeGen / 32 + 1];
2930 
2931  // Confirm allocation of SNN runtime data in main memory
2932  managerRuntimeData.allocated = true;
2933  managerRuntimeData.memType = CPU_MEM;
2934 }
2935 
2936 int SNN::assignGroup(int gGrpId, int availableNeuronId) {
2937  int newAvailableNeuronId;
2938  assert(groupConfigMDMap[gGrpId].gStartN == -1); // The group has not yet been assigned
2939  groupConfigMDMap[gGrpId].gStartN = availableNeuronId;
2940  groupConfigMDMap[gGrpId].gEndN = availableNeuronId + groupConfigMap[gGrpId].numN - 1;
2941 
2942  KERNEL_DEBUG("Allocation for %d(%s), St=%d, End=%d",
2943  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMDMap[gGrpId].gStartN, groupConfigMDMap[gGrpId].gEndN);
2944 
2945  newAvailableNeuronId = availableNeuronId + groupConfigMap[gGrpId].numN;
2946  //assert(newAvailableNeuronId <= numN);
2947 
2948  return newAvailableNeuronId;
2949 }
2950 
2951 int SNN::assignGroup(std::list<GroupConfigMD>::iterator grpIt, int localGroupId, int availableNeuronId) {
2952  int newAvailableNeuronId;
2953  assert(grpIt->lGrpId == -1); // The group has not yet been assigned
2954  grpIt->lGrpId = localGroupId;
2955  grpIt->lStartN = availableNeuronId;
2956  grpIt->lEndN = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN - 1;
2957 
2958  grpIt->LtoGOffset = grpIt->gStartN - grpIt->lStartN;
2959  grpIt->GtoLOffset = grpIt->lStartN - grpIt->gStartN;
2960 
2961  KERNEL_DEBUG("Allocation for group (%s) [id:%d, local id:%d], St=%d, End=%d", groupConfigMap[grpIt->gGrpId].grpName.c_str(),
2962  grpIt->gGrpId, grpIt->lGrpId, grpIt->lStartN, grpIt->lEndN);
2963 
2964  newAvailableNeuronId = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN;
2965 
2966  return newAvailableNeuronId;
2967 }
2968 
2969 void SNN::generateGroupRuntime(int netId, int lGrpId) {
2970  resetNeuromodulator(netId, lGrpId);
2971 
2972  for(int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
2973  resetNeuron(netId, lGrpId, lNId);
2974 }
2975 
2976 void SNN::generateRuntimeGroupConfigs() {
2977  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2978  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
2979  // publish the group configs in an array for quick access and accessible on GPUs (cuda doesn't support std::list)
2980  int gGrpId = grpIt->gGrpId;
2981  int lGrpId = grpIt->lGrpId;
2982 
2983  // Data published by groupConfigMDMap[] are generated in compileSNN() and are invariant in partitionSNN()
2984  // Data published by grpIt are generated in partitionSNN() and maybe have duplicated copys
2985  groupConfigs[netId][lGrpId].netId = grpIt->netId;
2986  groupConfigs[netId][lGrpId].gGrpId = grpIt->gGrpId;
2987  groupConfigs[netId][lGrpId].gStartN = grpIt->gStartN;
2988  groupConfigs[netId][lGrpId].gEndN = grpIt->gEndN;
2989  groupConfigs[netId][lGrpId].lGrpId = grpIt->lGrpId;
2990  groupConfigs[netId][lGrpId].lStartN = grpIt->lStartN;
2991  groupConfigs[netId][lGrpId].lEndN = grpIt->lEndN;
2992  groupConfigs[netId][lGrpId].LtoGOffset = grpIt->LtoGOffset;
2993  groupConfigs[netId][lGrpId].GtoLOffset = grpIt->GtoLOffset;
2994  groupConfigs[netId][lGrpId].Type = groupConfigMap[gGrpId].type;
2995  groupConfigs[netId][lGrpId].numN = groupConfigMap[gGrpId].numN;
2996  groupConfigs[netId][lGrpId].numPostSynapses = grpIt->numPostSynapses;
2997  groupConfigs[netId][lGrpId].numPreSynapses = grpIt->numPreSynapses;
2998  groupConfigs[netId][lGrpId].isSpikeGenerator = groupConfigMap[gGrpId].isSpikeGenerator;
2999  groupConfigs[netId][lGrpId].isSpikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc != NULL ? true : false;
3000  groupConfigs[netId][lGrpId].WithSTP = groupConfigMap[gGrpId].stpConfig.WithSTP;
3001  groupConfigs[netId][lGrpId].WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
3002  groupConfigs[netId][lGrpId].WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
3003  groupConfigs[netId][lGrpId].WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
3004  groupConfigs[netId][lGrpId].WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
3005  groupConfigs[netId][lGrpId].WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
3006  groupConfigs[netId][lGrpId].WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
3007  groupConfigs[netId][lGrpId].WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
3008  groupConfigs[netId][lGrpId].WithHomeostasis = groupConfigMap[gGrpId].homeoConfig.WithHomeostasis;
3009  groupConfigs[netId][lGrpId].FixedInputWts = grpIt->fixedInputWts;
3010  groupConfigs[netId][lGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3011  groupConfigs[netId][lGrpId].Noffset = grpIt->Noffset; // Note: Noffset is not valid at this time
3012  groupConfigs[netId][lGrpId].MaxDelay = grpIt->maxOutgoingDelay;
3013  groupConfigs[netId][lGrpId].STP_A = groupConfigMap[gGrpId].stpConfig.STP_A;
3014  groupConfigs[netId][lGrpId].STP_U = groupConfigMap[gGrpId].stpConfig.STP_U;
3015  groupConfigs[netId][lGrpId].STP_tau_u_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv;
3016  groupConfigs[netId][lGrpId].STP_tau_x_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv;
3017  groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
3018  groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC;
3019  groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
3020  groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
3021  groupConfigs[netId][lGrpId].GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
3022  groupConfigs[netId][lGrpId].KAPPA = groupConfigMap[gGrpId].stdpConfig.KAPPA;
3023  groupConfigs[netId][lGrpId].OMEGA = groupConfigMap[gGrpId].stdpConfig.OMEGA;
3024  groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
3025  groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB;
3026  groupConfigs[netId][lGrpId].ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
3027  groupConfigs[netId][lGrpId].ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
3028  groupConfigs[netId][lGrpId].BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
3029  groupConfigs[netId][lGrpId].BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
3030  groupConfigs[netId][lGrpId].LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
3031  groupConfigs[netId][lGrpId].DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
3032 
3033  groupConfigs[netId][lGrpId].numCompNeighbors = 0;
3034  groupConfigs[netId][lGrpId].withCompartments = groupConfigMap[gGrpId].withCompartments;
3035  groupConfigs[netId][lGrpId].compCouplingUp = groupConfigMap[gGrpId].compCouplingUp;
3036  groupConfigs[netId][lGrpId].compCouplingDown = groupConfigMap[gGrpId].compCouplingDown;
3037  memset(&groupConfigs[netId][lGrpId].compNeighbors, 0, sizeof(groupConfigs[netId][lGrpId].compNeighbors[0])*MAX_NUM_COMP_CONN);
3038  memset(&groupConfigs[netId][lGrpId].compCoupling, 0, sizeof(groupConfigs[netId][lGrpId].compCoupling[0])*MAX_NUM_COMP_CONN);
3039 
3041  groupConfigs[netId][lGrpId].avgTimeScale = groupConfigMap[gGrpId].homeoConfig.avgTimeScale;
3042  groupConfigs[netId][lGrpId].avgTimeScale_decay = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay;
3043  groupConfigs[netId][lGrpId].avgTimeScaleInv = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv;
3044  groupConfigs[netId][lGrpId].homeostasisScale = groupConfigMap[gGrpId].homeoConfig.homeostasisScale;
3045 
3046  // parameters of neuromodulator
3047  groupConfigs[netId][lGrpId].baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
3048  groupConfigs[netId][lGrpId].base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
3049  groupConfigs[netId][lGrpId].baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
3050  groupConfigs[netId][lGrpId].baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
3051  groupConfigs[netId][lGrpId].decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
3052  groupConfigs[netId][lGrpId].decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
3053  groupConfigs[netId][lGrpId].decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
3054  groupConfigs[netId][lGrpId].decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
3055 
3056  // sync groupConfigs[][] and groupConfigMDMap[]
3057  if (netId == grpIt->netId) {
3058  groupConfigMDMap[gGrpId].netId = grpIt->netId;
3059  groupConfigMDMap[gGrpId].gGrpId = grpIt->gGrpId;
3060  groupConfigMDMap[gGrpId].gStartN = grpIt->gStartN;
3061  groupConfigMDMap[gGrpId].gEndN = grpIt->gEndN;
3062  groupConfigMDMap[gGrpId].lGrpId = grpIt->lGrpId;
3063  groupConfigMDMap[gGrpId].lStartN = grpIt->lStartN;
3064  groupConfigMDMap[gGrpId].lEndN = grpIt->lEndN;
3065  groupConfigMDMap[gGrpId].numPostSynapses = grpIt->numPostSynapses;
3066  groupConfigMDMap[gGrpId].numPreSynapses = grpIt->numPreSynapses;
3067  groupConfigMDMap[gGrpId].LtoGOffset = grpIt->LtoGOffset;
3068  groupConfigMDMap[gGrpId].GtoLOffset = grpIt->GtoLOffset;
3069  groupConfigMDMap[gGrpId].fixedInputWts = grpIt->fixedInputWts;
3070  groupConfigMDMap[gGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3071  groupConfigMDMap[gGrpId].Noffset = grpIt->Noffset; // Note: Noffset is not valid at this time
3072  groupConfigMDMap[gGrpId].maxOutgoingDelay = grpIt->maxOutgoingDelay;
3073  }
3074  groupConfigs[netId][lGrpId].withParamModel_9 = groupConfigMap[gGrpId].withParamModel_9;
3075  groupConfigs[netId][lGrpId].isLIF = groupConfigMap[gGrpId].isLIF;
3076 
3077  }
3078 
3079  // FIXME: How does networkConfigs[netId].numGroups be availabe at this time?! Bug?!
3080  //int numNSpikeGen = 0;
3081  //for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3082  // if (netId == groupConfigs[netId][lGrpId].netId && groupConfigs[netId][lGrpId].isSpikeGenerator && groupConfigs[netId][lGrpId].isSpikeGenFunc) {
3083  // // we only need numNSpikeGen for spike generator callbacks that need to transfer their spikes to the GPU
3084  // groupConfigs[netId][lGrpId].Noffset = numNSpikeGen; // FIXME, Noffset is updated after publish group configs
3085  // numNSpikeGen += groupConfigs[netId][lGrpId].numN;
3086  // }
3087  //}
3088  //assert(numNSpikeGen <= networkConfigs[netId].numNPois);
3089  }
3090 }
3091 
3092 void SNN::generateRuntimeConnectConfigs() {
3093  // sync localConnectLists and connectConfigMap
3094  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3095  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3096  connectConfigMap[connIt->connId] = *connIt;
3097  }
3098 
3099  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3100  connectConfigMap[connIt->connId] = *connIt;
3101  }
3102  }
3103 }
3104 
3105 void SNN::generateRuntimeNetworkConfigs() {
3106  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3107  if (!groupPartitionLists[netId].empty()) {
3108  // copy the global network config to local network configs
3109  // global configuration for maximum axonal delay
3110  networkConfigs[netId].maxDelay = glbNetworkConfig.maxDelay;
3111 
3112  // configurations for execution features
3113  networkConfigs[netId].sim_with_fixedwts = sim_with_fixedwts;
3114  networkConfigs[netId].sim_with_conductances = sim_with_conductances;
3115  networkConfigs[netId].sim_with_homeostasis = sim_with_homeostasis;
3116  networkConfigs[netId].sim_with_stdp = sim_with_stdp;
3117  networkConfigs[netId].sim_with_stp = sim_with_stp;
3118  networkConfigs[netId].sim_in_testing = sim_in_testing;
3119 
3120  // search for active neuron monitor
3121  networkConfigs[netId].sim_with_nm = false;
3122  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3123  if (grpIt->netId == netId && grpIt->neuronMonitorId >= 0)
3124  networkConfigs[netId].sim_with_nm = true;
3125  }
3126 
3127  // stdp, da-stdp configurations
3128  networkConfigs[netId].stdpScaleFactor = stdpScaleFactor_;
3129  networkConfigs[netId].wtChangeDecay = wtChangeDecay_;
3130 
3131  // conductance configurations
3132  networkConfigs[netId].sim_with_NMDA_rise = sim_with_NMDA_rise;
3133  networkConfigs[netId].sim_with_GABAb_rise = sim_with_GABAb_rise;
3134  networkConfigs[netId].dAMPA = dAMPA;
3135  networkConfigs[netId].rNMDA = rNMDA;
3136  networkConfigs[netId].dNMDA = dNMDA;
3137  networkConfigs[netId].sNMDA = sNMDA;
3138  networkConfigs[netId].dGABAa = dGABAa;
3139  networkConfigs[netId].rGABAb = rGABAb;
3140  networkConfigs[netId].dGABAb = dGABAb;
3141  networkConfigs[netId].sGABAb = sGABAb;
3142 
3143  networkConfigs[netId].simIntegrationMethod = glbNetworkConfig.simIntegrationMethod;
3144  networkConfigs[netId].simNumStepsPerMs = glbNetworkConfig.simNumStepsPerMs;
3145  networkConfigs[netId].timeStep = glbNetworkConfig.timeStep;
3146 
3147  // configurations for boundries of neural types
3148  findNumN(netId, networkConfigs[netId].numN, networkConfigs[netId].numNExternal, networkConfigs[netId].numNAssigned,
3149  networkConfigs[netId].numNReg, networkConfigs[netId].numNExcReg, networkConfigs[netId].numNInhReg,
3150  networkConfigs[netId].numNPois, networkConfigs[netId].numNExcPois, networkConfigs[netId].numNInhPois);
3151 
3152  // configurations for assigned groups and connections
3153  networkConfigs[netId].numGroups = 0;
3154  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3155  if (grpIt->netId == netId)
3156  networkConfigs[netId].numGroups++;
3157  }
3158  networkConfigs[netId].numGroupsAssigned = groupPartitionLists[netId].size();
3159  //networkConfigs[netId].numConnections = localConnectLists[netId].size();
3160  //networkConfigs[netId].numAssignedConnections = localConnectLists[netId].size() + externalConnectLists[netId].size();
3161  //networkConfigs[netId].numConnections = localConnectLists[netId].size() + externalConnectLists[netId].size();
3162  networkConfigs[netId].numConnections = connectConfigMap.size();// temporarily solution: copy all connection info to each GPU
3163 
3164  // find the maximum number of pre- and post-connections among neurons
3165  // SNN::maxNumPreSynN and SNN::maxNumPostSynN are updated
3166  findMaxNumSynapsesNeurons(netId, networkConfigs[netId].maxNumPostSynN, networkConfigs[netId].maxNumPreSynN);
3167 
3168  // find the maximum number of spikes in D1 (i.e., maxDelay == 1) and D2 (i.e., maxDelay >= 2) sets
3169  findMaxSpikesD1D2(netId, networkConfigs[netId].maxSpikesD1, networkConfigs[netId].maxSpikesD2);
3170 
3171  // find the total number of synapses in the network
3172  findNumSynapsesNetwork(netId, networkConfigs[netId].numPostSynNet, networkConfigs[netId].numPreSynNet);
3173 
3174  // find out number of user-defined spike gen and update Noffset of each group config
3175  // Note: groupConfigs[][].Noffset is valid at this time
3176  findNumNSpikeGenAndOffset(netId);
3177  }
3178  }
3179 
3180  // find manager runtime data size, which is sufficient to hold the data of any gpu runtime
3181  memset(&managerRTDSize, 0, sizeof(ManagerRuntimeDataSize));
3182  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3183  if (!groupPartitionLists[netId].empty()) {
3184  // find the maximum number of numN, numNReg ,and numNAssigned among local networks
3185  if (networkConfigs[netId].numNReg > managerRTDSize.maxNumNReg) managerRTDSize.maxNumNReg = networkConfigs[netId].numNReg;
3186  if (networkConfigs[netId].numN > managerRTDSize.maxNumN) managerRTDSize.maxNumN = networkConfigs[netId].numN;
3187  if (networkConfigs[netId].numNAssigned > managerRTDSize.maxNumNAssigned) managerRTDSize.maxNumNAssigned = networkConfigs[netId].numNAssigned;
3188 
3189  // find the maximum number of numNSpikeGen among local networks
3190  if (networkConfigs[netId].numNSpikeGen > managerRTDSize.maxNumNSpikeGen) managerRTDSize.maxNumNSpikeGen = networkConfigs[netId].numNSpikeGen;
3191 
3192  // find the maximum number of numGroups and numConnections among local networks
3193  if (networkConfigs[netId].numGroups > managerRTDSize.maxNumGroups) managerRTDSize.maxNumGroups = networkConfigs[netId].numGroups;
3194  if (networkConfigs[netId].numConnections > managerRTDSize.maxNumConnections) managerRTDSize.maxNumConnections = networkConfigs[netId].numConnections;
3195 
3196  // find the maximum number of neurons in a group among local networks
3197  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3198  if (groupConfigMap[grpIt->gGrpId].numN > managerRTDSize.maxNumNPerGroup) managerRTDSize.maxNumNPerGroup = groupConfigMap[grpIt->gGrpId].numN;
3199  }
3200 
3201  // find the maximum number of maxSipkesD1(D2) among networks
3202  if (networkConfigs[netId].maxSpikesD1 > managerRTDSize.maxMaxSpikeD1) managerRTDSize.maxMaxSpikeD1 = networkConfigs[netId].maxSpikesD1;
3203  if (networkConfigs[netId].maxSpikesD2 > managerRTDSize.maxMaxSpikeD2) managerRTDSize.maxMaxSpikeD2 = networkConfigs[netId].maxSpikesD2;
3204 
3205  // find the maximum number of total # of pre- and post-connections among local networks
3206  if (networkConfigs[netId].numPreSynNet > managerRTDSize.maxNumPreSynNet) managerRTDSize.maxNumPreSynNet = networkConfigs[netId].numPreSynNet;
3207  if (networkConfigs[netId].numPostSynNet > managerRTDSize.maxNumPostSynNet) managerRTDSize.maxNumPostSynNet = networkConfigs[netId].numPostSynNet;
3208 
3209  // find the number of numN, and numNReg in the global network
3210  managerRTDSize.glbNumN += networkConfigs[netId].numN;
3211  managerRTDSize.glbNumNReg += networkConfigs[netId].numNReg;
3212  }
3213  }
3214 }
3215 
3216 bool compareSrcNeuron(const ConnectionInfo& first, const ConnectionInfo& second) {
3217  return (first.nSrc + first.srcGLoffset < second.nSrc + second.srcGLoffset);
3218 }
3219 
3220 bool compareDelay(const ConnectionInfo& first, const ConnectionInfo& second) {
3221  return (first.delay < second.delay);
3222 }
3223 
3224 // Note: ConnectInfo stored in connectionList use global ids
3225 void SNN::generateConnectionRuntime(int netId) {
3226  std::map<int, int> GLoffset; // global nId to local nId offset
3227  std::map<int, int> GLgrpId; // global grpId to local grpId offset
3228 
3229  // load offset between global neuron id and local neuron id
3230  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3231  GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
3232  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3233  }
3234  // FIXME: connId is global connId, use connectConfigs[netId][local connId] instead,
3235  // FIXME; but note connectConfigs[netId][] are NOT complete, lack of exeternal incoming connections
3236  // generate mulSynFast, mulSynSlow in connection-centric array
3237  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3238  // store scaling factors for synaptic currents in connection-centric array
3239  mulSynFast[connIt->second.connId] = connIt->second.mulSynFast;
3240  mulSynSlow[connIt->second.connId] = connIt->second.mulSynSlow;
3241  }
3242 
3243  // parse ConnectionInfo stored in connectionLists[0]
3244  // note: ConnectInfo stored in connectionList use global ids
3245  // generate Npost, Npre, Npre_plastic
3246  int parsedConnections = 0;
3247  memset(managerRuntimeData.Npost, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3248  memset(managerRuntimeData.Npre, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3249  memset(managerRuntimeData.Npre_plastic, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3250  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3251  connIt->srcGLoffset = GLoffset[connIt->grpSrc];
3252  if (managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] == SYNAPSE_ID_MASK) {
3253  KERNEL_ERROR("Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)", SYNAPSE_ID_MASK, connIt->nSrc, connIt->grpSrc);
3255  }
3256  if (managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]] == SYNAPSE_ID_MASK) {
3257  KERNEL_ERROR("Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)", SYNAPSE_ID_MASK, connIt->nDest, connIt->grpDest);
3259  }
3260  managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3261  managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3262 
3263  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_PLASTIC) {
3264  sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
3265  managerRuntimeData.Npre_plastic[connIt->nDest + GLoffset[connIt->grpDest]]++;
3266 
3267  // homeostasis
3268  if (groupConfigMap[connIt->grpDest].homeoConfig.WithHomeostasis && groupConfigMDMap[connIt->grpDest].homeoId == -1)
3269  groupConfigMDMap[connIt->grpDest].homeoId = connIt->nDest + GLoffset[connIt->grpDest]; // this neuron info will be printed
3270 
3271  // old access to homeostasis
3272  //if (groupConfigs[netId][GLgrpId[it->grpDest]].WithHomeostasis && groupConfigs[netId][GLgrpId[it->grpDest]].homeoId == -1)
3273  // groupConfigs[netId][GLgrpId[it->grpDest]].homeoId = it->nDest + GLoffset[it->grpDest]; // this neuron info will be printed
3274  }
3275 
3276  // generate the delay vaule
3277  //it->delay = connectConfigMap[it->connId].minDelay + rand() % (connectConfigMap[it->connId].maxDelay - connectConfigMap[it->connId].minDelay + 1);
3278  //assert((it->delay >= connectConfigMap[it->connId].minDelay) && (it->delay <= connectConfigMap[it->connId].maxDelay));
3279  // generate the max weight and initial weight
3280  //float initWt = generateWeight(connectConfigMap[it->connId].connProp, connectConfigMap[it->connId].initWt, connectConfigMap[it->connId].maxWt, it->nSrc, it->grpSrc);
3281  //float initWt = connectConfigMap[it->connId].initWt;
3282  //float maxWt = connectConfigMap[it->connId].maxWt;
3283  // adjust sign of weight based on pre-group (negative if pre is inhibitory)
3284  // this access is fine, isExcitatoryGroup() use global grpId
3285  //it->maxWt = isExcitatoryGroup(it->grpSrc) ? fabs(maxWt) : -1.0 * fabs(maxWt);
3286  //it->initWt = isExcitatoryGroup(it->grpSrc) ? fabs(initWt) : -1.0 * fabs(initWt);
3287 
3288  parsedConnections++;
3289  }
3290  assert(parsedConnections == networkConfigs[netId].numPostSynNet && parsedConnections == networkConfigs[netId].numPreSynNet);
3291 
3292  // generate cumulativePost and cumulativePre
3293  managerRuntimeData.cumulativePost[0] = 0;
3294  managerRuntimeData.cumulativePre[0] = 0;
3295  for (int lNId = 1; lNId < networkConfigs[netId].numNAssigned; lNId++) {
3296  managerRuntimeData.cumulativePost[lNId] = managerRuntimeData.cumulativePost[lNId - 1] + managerRuntimeData.Npost[lNId - 1];
3297  managerRuntimeData.cumulativePre[lNId] = managerRuntimeData.cumulativePre[lNId - 1] + managerRuntimeData.Npre[lNId - 1];
3298  }
3299 
3300  // generate preSynapticIds, parse plastic connections first
3301  memset(managerRuntimeData.Npre, 0, sizeof(short) * networkConfigs[netId].numNAssigned); // reset managerRuntimeData.Npre to zero, so that it can be used as synId
3302  parsedConnections = 0;
3303  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3304  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_PLASTIC) {
3305  int pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3306  assert(pre_pos < networkConfigs[netId].numPreSynNet);
3307 
3308  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc])); // managerRuntimeData.Npost[it->nSrc] is not availabe at this parse
3309  connIt->preSynId = managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]; // save managerRuntimeData.Npre[it->nDest] as synId
3310 
3311  managerRuntimeData.Npre[connIt->nDest+ GLoffset[connIt->grpDest]]++;
3312  parsedConnections++;
3313 
3314  // update the maximum number of and pre-connections of a neuron in a group
3315  //if (managerRuntimeData.Npre[it->nDest] > groupInfo[it->grpDest].maxPreConn)
3316  // groupInfo[it->grpDest].maxPreConn = managerRuntimeData.Npre[it->nDest];
3317  }
3318  }
3319  // parse fixed connections
3320  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3321  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_FIXED) {
3322  int pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3323  assert(pre_pos < networkConfigs[netId].numPreSynNet);
3324 
3325  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc])); // managerRuntimeData.Npost[it->nSrc] is not availabe at this parse
3326  connIt->preSynId = managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]; // save managerRuntimeData.Npre[it->nDest] as synId
3327 
3328  managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3329  parsedConnections++;
3330 
3331  // update the maximum number of and pre-connections of a neuron in a group
3332  //if (managerRuntimeData.Npre[it->nDest] > groupInfo[it->grpDest].maxPreConn)
3333  // groupInfo[it->grpDest].maxPreConn = managerRuntimeData.Npre[it->nDest];
3334  }
3335  }
3336  assert(parsedConnections == networkConfigs[netId].numPreSynNet);
3337  //printf("parsed pre connections %d\n", parsedConnections);
3338 
3339  // generate postSynapticIds
3340  connectionLists[netId].sort(compareSrcNeuron); // sort by local nSrc id
3341  memset(managerRuntimeData.postDelayInfo, 0, sizeof(DelayInfo) * (networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1)));
3342  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) { // pre-neuron order, local nId
3343  if (managerRuntimeData.Npost[lNId] > 0) {
3344  std::list<ConnectionInfo> postConnectionList;
3345  ConnectionInfo targetConn;
3346  targetConn.nSrc = lNId ; // the other fields does not matter, use local nid to search
3347 
3348  std::list<ConnectionInfo>::iterator firstPostConn = std::find(connectionLists[netId].begin(), connectionLists[netId].end(), targetConn);
3349  std::list<ConnectionInfo>::iterator lastPostConn = firstPostConn;
3350  std::advance(lastPostConn, managerRuntimeData.Npost[lNId]);
3351  managerRuntimeData.Npost[lNId] = 0; // reset managerRuntimeData.Npost[lNId] to zero, so that it can be used as synId
3352 
3353  postConnectionList.splice(postConnectionList.begin(), connectionLists[netId], firstPostConn, lastPostConn);
3354  postConnectionList.sort(compareDelay);
3355 
3356  int post_pos, pre_pos, lastDelay = 0;
3357  parsedConnections = 0;
3358  //memset(&managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1)], 0, sizeof(DelayInfo) * (glbNetworkConfig.maxDelay + 1));
3359  for (std::list<ConnectionInfo>::iterator connIt = postConnectionList.begin(); connIt != postConnectionList.end(); connIt++) {
3360  assert(connIt->nSrc + GLoffset[connIt->grpSrc] == lNId);
3361  post_pos = managerRuntimeData.cumulativePost[connIt->nSrc + GLoffset[connIt->grpSrc]] + managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3362  pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + connIt->preSynId;
3363 
3364  assert(post_pos < networkConfigs[netId].numPostSynNet);
3365  //assert(pre_pos < numPreSynNet);
3366 
3367  // generate a post synaptic id for the current connection
3368  managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID((connIt->nDest + GLoffset[connIt->grpDest]), connIt->preSynId, (GLgrpId[connIt->grpDest]));// used stored managerRuntimeData.Npre[it->nDest] in it->preSynId
3369  // generate a delay look up table by the way
3370  assert(connIt->delay > 0);
3371  if (connIt->delay > lastDelay) {
3372  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_index_start = managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3373  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_length++;
3374  } else if (connIt->delay == lastDelay) {
3375  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_length++;
3376  } else {
3377  KERNEL_ERROR("Post-synaptic delays not sorted correctly... pre_id=%d, delay[%d]=%d, delay[%d]=%d",
3378  lNId, managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], connIt->delay, managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] - 1, lastDelay);
3379  }
3380  lastDelay = connIt->delay;
3381 
3382  // update the corresponding pre synaptic id
3383  SynInfo preId = managerRuntimeData.preSynapticIds[pre_pos];
3384  assert(GET_CONN_NEURON_ID(preId) == connIt->nSrc + GLoffset[connIt->grpSrc]);
3385  //assert(GET_CONN_GRP_ID(preId) == it->grpSrc);
3386  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], (GLgrpId[connIt->grpSrc]));
3387  managerRuntimeData.wt[pre_pos] = connIt->initWt;
3388  managerRuntimeData.maxSynWt[pre_pos] = connIt->maxWt;
3389  managerRuntimeData.connIdsPreIdx[pre_pos] = connIt->connId;
3390 
3391  managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3392  parsedConnections++;
3393 
3394  // update the maximum number of and post-connections of a neuron in a group
3395  //if (managerRuntimeData.Npost[it->nSrc] > groupInfo[it->grpSrc].maxPostConn)
3396  // groupInfo[it->grpSrc].maxPostConn = managerRuntimeData.Npost[it->nSrc];
3397  }
3398  assert(parsedConnections == managerRuntimeData.Npost[lNId]);
3399  //printf("parsed post connections %d\n", parsedConnections);
3400  // note: elements in postConnectionList are deallocated automatically with postConnectionList
3401  /* for postDelayInfo debugging
3402  printf("%d ", lNId);
3403  for (int t = 0; t < maxDelay_ + 1; t ++) {
3404  printf("[%d,%d]",
3405  managerRuntimeData.postDelayInfo[lNId * (maxDelay_ + 1) + t].delay_index_start,
3406  managerRuntimeData.postDelayInfo[lNId * (maxDelay_ + 1) + t].delay_length);
3407  }
3408  printf("\n");
3409  */
3410  }
3411  }
3412  assert(connectionLists[netId].empty());
3413 
3414  //int p = managerRuntimeData.Npost[src];
3415 
3416  //assert(managerRuntimeData.Npost[src] >= 0);
3417  //assert(managerRuntimeData.Npre[dest] >= 0);
3418  //assert((src * maxNumPostSynGrp + p) / numN < maxNumPostSynGrp); // divide by numN to prevent INT overflow
3419 
3420  //unsigned int post_pos = managerRuntimeData.cumulativePost[src] + managerRuntimeData.Npost[src];
3421  //unsigned int pre_pos = managerRuntimeData.cumulativePre[dest] + managerRuntimeData.Npre[dest];
3422 
3423  //assert(post_pos < numPostSynNet);
3424  //assert(pre_pos < numPreSynNet);
3425 
3427  //managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID(dest, managerRuntimeData.Npre[dest], destGrp);
3428  //tmp_SynapticDelay[post_pos] = dVal;
3429 
3430  //managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID(src, managerRuntimeData.Npost[src], srcGrp);
3431  //managerRuntimeData.wt[pre_pos] = synWt;
3432  //managerRuntimeData.maxSynWt[pre_pos] = maxWt;
3433  //managerRuntimeData.connIdsPreIdx[pre_pos] = connId;
3434 
3435  //bool synWtType = GET_FIXED_PLASTIC(connProp);
3436 
3437  //if (synWtType == SYN_PLASTIC) {
3438  // sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
3439  // managerRuntimeData.Npre_plastic[dest]++;
3440  // // homeostasis
3441  // if (groupConfigs[0][destGrp].WithHomeostasis && groupConfigs[0][destGrp].homeoId ==-1)
3442  // groupConfigs[0][destGrp].homeoId = dest; // this neuron info will be printed
3443  //}
3444 
3445  //managerRuntimeData.Npre[dest] += 1;
3446  //managerRuntimeData.Npost[src] += 1;
3447 
3448  //groupInfo[srcGrp].numPostConn++;
3449  //groupInfo[destGrp].numPreConn++;
3450 
3452  //if (managerRuntimeData.Npost[src] > groupInfo[srcGrp].maxPostConn)
3453  // groupInfo[srcGrp].maxPostConn = managerRuntimeData.Npost[src];
3454  //if (managerRuntimeData.Npre[dest] > groupInfo[destGrp].maxPreConn)
3455  // groupInfo[destGrp].maxPreConn = managerRuntimeData.Npre[src];
3456 }
3457 
3458 void SNN::generateCompConnectionRuntime(int netId)
3459 {
3460  std::map<int, int> GLgrpId; // global grpId to local grpId offset
3461 
3462  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3463  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3464  //printf("Global group id %i; Local group id %i\n", grpIt->gGrpId, grpIt->lGrpId);
3465  }
3466 
3467  //printf("The current netid is: %i\n", netId);
3468 
3469  for (std::list<compConnectConfig>::iterator connIt = localCompConnectLists[netId].begin(); connIt != localCompConnectLists[netId].end(); connIt++) {
3470  //printf("The size of localCompConnectLists is: %i\n", localCompConnectLists[netId].size());
3471  int grpLower = connIt->grpSrc;
3472  int grpUpper = connIt->grpDest;
3473 
3474  int i = groupConfigs[netId][GLgrpId[grpLower]].numCompNeighbors;
3475  if (i >= MAX_NUM_COMP_CONN) {
3476  KERNEL_ERROR("Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3477  groupConfigMap[grpLower].grpName.c_str(), grpLower, (int)MAX_NUM_COMP_CONN);
3478  exitSimulation(1);
3479  }
3480  groupConfigs[netId][GLgrpId[grpLower]].compNeighbors[i] = grpUpper;
3481  groupConfigs[netId][GLgrpId[grpLower]].compCoupling[i] = groupConfigs[netId][GLgrpId[grpUpper]].compCouplingDown; // get down-coupling from upper neighbor
3482  groupConfigs[netId][GLgrpId[grpLower]].numCompNeighbors++;
3483 
3484  int j = groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors;
3485  if (j >= MAX_NUM_COMP_CONN) {
3486  KERNEL_ERROR("Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3487  groupConfigMap[grpUpper].grpName.c_str(), grpUpper, (int)MAX_NUM_COMP_CONN);
3488  exitSimulation(1);
3489  }
3490  groupConfigs[netId][GLgrpId[grpUpper]].compNeighbors[j] = grpLower;
3491  groupConfigs[netId][GLgrpId[grpUpper]].compCoupling[j] = groupConfigs[netId][GLgrpId[grpLower]].compCouplingUp; // get up-coupling from lower neighbor
3492  groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors++;
3493 
3494  //printf("Group %i (local group %i) has %i compartmental neighbors!\n", grpUpper, GLgrpId[grpUpper], groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors);
3495  }
3496 }
3497 
3498 
3499 void SNN::generatePoissonGroupRuntime(int netId, int lGrpId) {
3500  for(int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
3501  resetPoissonNeuron(netId, lGrpId, lNId);
3502 }
3503 
3504 
3505 void SNN::collectGlobalNetworkConfigC() {
3506  // scan all connect configs to find the maximum delay in the global network, update glbNetworkConfig.maxDelay
3507  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3508  if (connIt->second.maxDelay > glbNetworkConfig.maxDelay)
3509  glbNetworkConfig.maxDelay = connIt->second.maxDelay;
3510  }
3511  assert(connectConfigMap.size() > 0 || glbNetworkConfig.maxDelay != -1);
3512 
3513  // scan all group configs to find the number of (reg, pois, exc, inh) neuron in the global network
3514  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3515  if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3516  glbNetworkConfig.numNExcPois += groupConfigMap[gGrpId].numN;
3517  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3518  glbNetworkConfig.numNInhPois += groupConfigMap[gGrpId].numN;
3519  } else if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3520  glbNetworkConfig.numNExcReg += groupConfigMap[gGrpId].numN;
3521  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3522  glbNetworkConfig.numNInhReg += groupConfigMap[gGrpId].numN;
3523  }
3524 
3525  if (groupConfigMDMap[gGrpId].maxOutgoingDelay == 1)
3526  glbNetworkConfig.numN1msDelay += groupConfigMap[gGrpId].numN;
3527  else if (groupConfigMDMap[gGrpId].maxOutgoingDelay >= 2)
3528  glbNetworkConfig.numN2msDelay += groupConfigMap[gGrpId].numN;
3529  }
3530 
3531  glbNetworkConfig.numNReg = glbNetworkConfig.numNExcReg + glbNetworkConfig.numNInhReg;
3532  glbNetworkConfig.numNPois = glbNetworkConfig.numNExcPois + glbNetworkConfig.numNInhPois;
3533  glbNetworkConfig.numN = glbNetworkConfig.numNReg + glbNetworkConfig.numNPois;
3534 }
3535 
3536 
3537 void SNN::collectGlobalNetworkConfigP() {
3538  // print group and connection overview
3539  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3540  if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
3541  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
3542  glbNetworkConfig.numSynNet += connIt->numberOfConnections;
3543 
3544  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
3545  glbNetworkConfig.numSynNet += connIt->numberOfConnections;
3546  }
3547  }
3548 }
3549 
3550 // after all the initalization. Its time to create the synaptic weights, weight change and also
3551 // time of firing these are the mostly costly arrays so dense packing is essential to minimize wastage of space
3552 void SNN::compileSNN() {
3553  KERNEL_DEBUG("Beginning compilation of the network....");
3554 
3555  // compile (update) group and connection configs according to their mutual information
3556  // update GroupConfig::MaxDelay GroupConfig::FixedInputWts
3557  // assign GroupConfig::StartN and GroupConfig::EndN
3558  // Note: MaxDelay, FixedInputWts, StartN, and EndN are invariant in single-GPU or multi-GPUs mode
3559  compileGroupConfig();
3560 
3561  compileConnectConfig(); // for future use
3562 
3563  // collect the global network config according to compiled gorup and connection configs
3564  // collect SNN::maxDelay_
3565  collectGlobalNetworkConfigC();
3566 
3567  // perform various consistency checks:
3568  // - numNeurons vs. sum of all neurons
3569  // - STDP set on a post-group with incoming plastic connections
3570  // - etc.
3571  verifyNetwork();
3572 
3573  // display the global network configuration
3574  KERNEL_INFO("\n");
3575  KERNEL_INFO("************************** Global Network Configuration *******************************");
3576  KERNEL_INFO("The number of neurons in the network (numN) = %d", glbNetworkConfig.numN);
3577  KERNEL_INFO("The number of regular neurons in the network (numNReg:numNExcReg:numNInhReg) = %d:%d:%d", glbNetworkConfig.numNReg, glbNetworkConfig.numNExcReg, glbNetworkConfig.numNInhReg);
3578  KERNEL_INFO("The number of poisson neurons in the network (numNPois:numNExcPois:numInhPois) = %d:%d:%d", glbNetworkConfig.numNPois, glbNetworkConfig.numNExcPois, glbNetworkConfig.numNInhPois);
3579  KERNEL_INFO("The maximum axonal delay in the network (maxDelay) = %d", glbNetworkConfig.maxDelay);
3580 
3581  //ensure that we dont compile the network again
3582  snnState = COMPILED_SNN;
3583 }
3584 
3585 void SNN::compileConnectConfig() {
3586  // for future use
3587 }
3588 
3589 void SNN::compileGroupConfig() {
3590  int grpSrc;
3591  bool synWtType;
3592 
3593  // find the maximum delay for each group according to incoming connection
3594  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3595  // check if the current connection's delay meaning grpSrc's delay
3596  // is greater than the MaxDelay for grpSrc. We find the maximum
3597  // delay for the grpSrc by this scheme.
3598  grpSrc = connIt->second.grpSrc;
3599  if (connIt->second.maxDelay > groupConfigMDMap[grpSrc].maxOutgoingDelay)
3600  groupConfigMDMap[grpSrc].maxOutgoingDelay = connIt->second.maxDelay;
3601 
3602  // given group has plastic connection, and we need to apply STDP rule...
3603  synWtType = GET_FIXED_PLASTIC(connIt->second.connProp);
3604  if (synWtType == SYN_PLASTIC) {
3605  groupConfigMDMap[connIt->second.grpDest].fixedInputWts = false;
3606  }
3607  }
3608 
3609  // assigned global neruon ids to each group in the order...
3610  // !!!!!!! IMPORTANT : NEURON ORGANIZATION/ARRANGEMENT MAP !!!!!!!!!!
3611  // <--- Excitatory --> | <-------- Inhibitory REGION ----------> | <-- Excitatory -->
3612  // Excitatory-Regular | Inhibitory-Regular | Inhibitory-Poisson | Excitatory-Poisson
3613  int assignedGroup = 0;
3614  int availableNeuronId = 0;
3615  for(int order = 0; order < 4; order++) {
3616  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3617  if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 3) {
3618  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3619  assignedGroup++;
3620  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 2) {
3621  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3622  assignedGroup++;
3623  } else if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 0) {
3624  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3625  assignedGroup++;
3626  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 1) {
3627  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3628  assignedGroup++;
3629  }
3630  }
3631  }
3632  //assert(availableNeuronId == numN);
3633  assert(assignedGroup == numGroups);
3634 }
3635 
3636 void SNN::connectNetwork() {
3637  // this parse generates local connections
3638  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3639  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3640  switch(connIt->type) {
3641  case CONN_RANDOM:
3642  connectRandom(netId, connIt, false);
3643  break;
3644  case CONN_FULL:
3645  connectFull(netId, connIt, false);
3646  break;
3647  case CONN_FULL_NO_DIRECT:
3648  connectFull(netId, connIt, false);
3649  break;
3650  case CONN_ONE_TO_ONE:
3651  connectOneToOne(netId, connIt, false);
3652  break;
3653  case CONN_GAUSSIAN:
3654  connectGaussian(netId, connIt, false);
3655  break;
3656  case CONN_USER_DEFINED:
3657  connectUserDefined(netId, connIt, false);
3658  break;
3659  default:
3660  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3661  exitSimulation(-1);
3662  }
3663  }
3664  }
3665 
3666  // this parse generates external connections
3667  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3668  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3669  switch(connIt->type) {
3670  case CONN_RANDOM:
3671  connectRandom(netId, connIt, true);
3672  break;
3673  case CONN_FULL:
3674  connectFull(netId, connIt, true);
3675  break;
3676  case CONN_FULL_NO_DIRECT:
3677  connectFull(netId, connIt, true);
3678  break;
3679  case CONN_ONE_TO_ONE:
3680  connectOneToOne(netId, connIt, true);
3681  break;
3682  case CONN_GAUSSIAN:
3683  connectGaussian(netId, connIt, true);
3684  break;
3685  case CONN_USER_DEFINED:
3686  connectUserDefined(netId, connIt, true);
3687  break;
3688  default:
3689  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3690  exitSimulation(-1);
3691  }
3692  }
3693  }
3694 }
3695 
3697 inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, int externalNetId) {
3698  //assert(destN <= CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
3699  ConnectionInfo connInfo;
3700  connInfo.grpSrc = _grpSrc;
3701  connInfo.grpDest = _grpDest;
3702  connInfo.nSrc = _nSrc;
3703  connInfo.nDest = _nDest;
3704  connInfo.srcGLoffset = 0;
3705  connInfo.connId = _connId;
3706  connInfo.preSynId = -1;
3707  connInfo.initWt = 0.0f;
3708  connInfo.maxWt = 0.0f;
3709  connInfo.delay = 0;
3710 
3711  // generate the delay vaule
3712  connInfo.delay = connectConfigMap[_connId].minDelay + rand() % (connectConfigMap[_connId].maxDelay - connectConfigMap[_connId].minDelay + 1);
3713  assert((connInfo.delay >= connectConfigMap[_connId].minDelay) && (connInfo.delay <= connectConfigMap[_connId].maxDelay));
3714  // generate the max weight and initial weight
3715  //float initWt = generateWeight(connectConfigMap[it->connId].connProp, connectConfigMap[it->connId].initWt, connectConfigMap[it->connId].maxWt, it->nSrc, it->grpSrc);
3716  float initWt = connectConfigMap[_connId].initWt;
3717  float maxWt = connectConfigMap[_connId].maxWt;
3718  // adjust sign of weight based on pre-group (negative if pre is inhibitory)
3719  // this access is fine, isExcitatoryGroup() use global grpId
3720  connInfo.maxWt = isExcitatoryGroup(_grpSrc) ? fabs(maxWt) : -1.0 * fabs(maxWt);
3721  connInfo.initWt = isExcitatoryGroup(_grpSrc) ? fabs(initWt) : -1.0 * fabs(initWt);
3722 
3723  connectionLists[netId].push_back(connInfo);
3724 
3725  // If the connection is external, copy the connection info to the external network
3726  if (externalNetId >= 0)
3727  connectionLists[externalNetId].push_back(connInfo);
3728 }
3729 
3731 inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, float initWt, float maxWt, uint8_t delay, int externalNetId) {
3732  //assert(destN <= CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
3733  ConnectionInfo connInfo;
3734  connInfo.grpSrc = _grpSrc;
3735  connInfo.grpDest = _grpDest;
3736  connInfo.nSrc = _nSrc;
3737  connInfo.nDest = _nDest;
3738  connInfo.srcGLoffset = 0;
3739  connInfo.connId = _connId;
3740  connInfo.preSynId = -1;
3741  // adjust the sign of the weight based on inh/exc connection
3742  connInfo.initWt = isExcitatoryGroup(_grpSrc) ? fabs(initWt) : -1.0*fabs(initWt);
3743  connInfo.maxWt = isExcitatoryGroup(_grpSrc) ? fabs(maxWt) : -1.0*fabs(maxWt);
3744  connInfo.delay = delay;
3745 
3746  connectionLists[netId].push_back(connInfo);
3747 
3748  // If the connection is external, copy the connection info to the external network
3749  if (externalNetId >= 0)
3750  connectionLists[externalNetId].push_back(connInfo);
3751 }
3752 
3753 // make 'C' full connections from grpSrc to grpDest
3754 void SNN::connectFull(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3755  int grpSrc = connIt->grpSrc;
3756  int grpDest = connIt->grpDest;
3757  bool noDirect = (connIt->type == CONN_FULL_NO_DIRECT);
3758  int externalNetId = -1;
3759 
3760  if (isExternal) {
3761  externalNetId = groupConfigMDMap[grpDest].netId;
3762  assert(netId != externalNetId);
3763  }
3764 
3765  int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3766  for(int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3767  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
3768  int gPostStart = groupConfigMDMap[grpDest].gStartN;
3769  for(int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) { // j: the temp neuron id
3770  // if flag is set, don't connect direct connections
3771  if(noDirect && gPreN == gPostN)
3772  continue;
3773 
3774  // check whether pre-neuron location is in RF of post-neuron
3775  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
3776  if (!isPoint3DinRF(connIt->connRadius, locPre, locPost))
3777  continue;
3778 
3779  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3780  connIt->numberOfConnections++;
3781  }
3782  }
3783 
3784  std::list<GroupConfigMD>::iterator grpIt;
3785  GroupConfigMD targetGrp;
3786 
3787  // update numPostSynapses and numPreSynapses of groups in the local network
3788  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3789  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3790  assert(grpIt != groupPartitionLists[netId].end());
3791  grpIt->numPostSynapses += connIt->numberOfConnections;
3792 
3793  targetGrp.gGrpId = grpDest; // the other fields does not matter
3794  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3795  assert(grpIt != groupPartitionLists[netId].end());
3796  grpIt->numPreSynapses += connIt->numberOfConnections;
3797 
3798  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3799  if (isExternal) {
3800  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3801  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3802  assert(grpIt != groupPartitionLists[externalNetId].end());
3803  grpIt->numPostSynapses += connIt->numberOfConnections;
3804 
3805  targetGrp.gGrpId = grpDest; // the other fields does not matter
3806  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3807  assert(grpIt != groupPartitionLists[externalNetId].end());
3808  grpIt->numPreSynapses += connIt->numberOfConnections;
3809  }
3810 }
3811 
3812 void SNN::connectGaussian(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3813  // in case pre and post have different Grid3D sizes: scale pre to the grid size of post
3814  int grpSrc = connIt->grpSrc;
3815  int grpDest = connIt->grpDest;
3816  Grid3D grid_i = getGroupGrid3D(grpSrc);
3817  Grid3D grid_j = getGroupGrid3D(grpDest);
3818  Point3D scalePre = Point3D(grid_j.numX, grid_j.numY, grid_j.numZ) / Point3D(grid_i.numX, grid_i.numY, grid_i.numZ);
3819  int externalNetId = -1;
3820 
3821  if (isExternal) {
3822  externalNetId = groupConfigMDMap[grpDest].netId;
3823  assert(netId != externalNetId);
3824  }
3825 
3826  for(int i = groupConfigMDMap[grpSrc].gStartN; i <= groupConfigMDMap[grpSrc].gEndN; i++) {
3827  Point3D loc_i = getNeuronLocation3D(i)*scalePre; // i: adjusted 3D coordinates
3828 
3829  for(int j = groupConfigMDMap[grpDest].gStartN; j <= groupConfigMDMap[grpDest].gEndN; j++) { // j: the temp neuron id
3830  // check whether pre-neuron location is in RF of post-neuron
3831  Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
3832 
3833  // make sure point is in RF
3834  double rfDist = getRFDist3D(connIt->connRadius,loc_i,loc_j);
3835  if (rfDist < 0.0 || rfDist > 1.0)
3836  continue;
3837 
3838  // if rfDist is valid, it returns a number between 0 and 1
3839  // we want these numbers to fit to Gaussian weigths, so that rfDist=0 corresponds to max Gaussian weight
3840  // and rfDist=1 corresponds to 0.1 times max Gaussian weight
3841  // so we're looking at gauss = exp(-a*rfDist), where a such that exp(-a)=0.1
3842  // solving for a, we find that a = 2.3026
3843  double gauss = exp(-2.3026*rfDist);
3844  if (gauss < 0.1)
3845  continue;
3846 
3847  if (drand48() < connIt->connProbability) {
3848  float initWt = gauss * connIt->initWt; // scale weight according to gauss distance
3849  float maxWt = connIt->maxWt;
3850  uint8_t delay = connIt->minDelay + rand() % (connIt->maxDelay - connIt->minDelay + 1);
3851  assert((delay >= connIt->minDelay) && (delay <= connIt->maxDelay));
3852 
3853  connectNeurons(netId, grpSrc, grpDest, i, j, connIt->connId, initWt, maxWt, delay, externalNetId);
3854  connIt->numberOfConnections++;
3855  }
3856  }
3857  }
3858 
3859  std::list<GroupConfigMD>::iterator grpIt;
3860  GroupConfigMD targetGrp;
3861 
3862  // update numPostSynapses and numPreSynapses of groups in the local network
3863  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3864  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3865  assert(grpIt != groupPartitionLists[netId].end());
3866  grpIt->numPostSynapses += connIt->numberOfConnections;
3867 
3868  targetGrp.gGrpId = grpDest; // the other fields does not matter
3869  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3870  assert(grpIt != groupPartitionLists[netId].end());
3871  grpIt->numPreSynapses += connIt->numberOfConnections;
3872 
3873  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3874  if (isExternal) {
3875  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3876  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3877  assert(grpIt != groupPartitionLists[externalNetId].end());
3878  grpIt->numPostSynapses += connIt->numberOfConnections;
3879 
3880  targetGrp.gGrpId = grpDest; // the other fields does not matter
3881  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3882  assert(grpIt != groupPartitionLists[externalNetId].end());
3883  grpIt->numPreSynapses += connIt->numberOfConnections;
3884  }
3885 }
3886 
3887 void SNN::connectOneToOne(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3888  int grpSrc = connIt->grpSrc;
3889  int grpDest = connIt->grpDest;
3890  int externalNetId = -1;
3891 
3892  if (isExternal) {
3893  externalNetId = groupConfigMDMap[grpDest].netId;
3894  assert(netId != externalNetId);
3895  }
3896 
3897  assert( groupConfigMap[grpDest].numN == groupConfigMap[grpSrc].numN);
3898 
3899  // NOTE: RadiusRF does not make a difference here: ignore
3900  for(int gPreN = groupConfigMDMap[grpSrc].gStartN, gPostN = groupConfigMDMap[grpDest].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++, gPostN++) {
3901  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3902  connIt->numberOfConnections++;
3903  }
3904 
3905  std::list<GroupConfigMD>::iterator grpIt;
3906  GroupConfigMD targetGrp;
3907 
3908  // update numPostSynapses and numPreSynapses of groups in the local network
3909  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3910  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3911  assert(grpIt != groupPartitionLists[netId].end());
3912  grpIt->numPostSynapses += connIt->numberOfConnections;
3913 
3914  targetGrp.gGrpId = grpDest; // the other fields does not matter
3915  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3916  assert(grpIt != groupPartitionLists[netId].end());
3917  grpIt->numPreSynapses += connIt->numberOfConnections;
3918 
3919  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3920  if (isExternal) {
3921  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3922  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3923  assert(grpIt != groupPartitionLists[externalNetId].end());
3924  grpIt->numPostSynapses += connIt->numberOfConnections;
3925 
3926  targetGrp.gGrpId = grpDest; // the other fields does not matter
3927  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3928  assert(grpIt != groupPartitionLists[externalNetId].end());
3929  grpIt->numPreSynapses += connIt->numberOfConnections;
3930  }
3931 }
3932 
3933 // make 'C' random connections from grpSrc to grpDest
3934 void SNN::connectRandom(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3935  int grpSrc = connIt->grpSrc;
3936  int grpDest = connIt->grpDest;
3937  int externalNetId = -1;
3938 
3939  if (isExternal) {
3940  externalNetId = groupConfigMDMap[grpDest].netId;
3941  assert(netId != externalNetId);
3942  }
3943 
3944  int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3945  for(int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3946  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
3947  int gPostStart = groupConfigMDMap[grpDest].gStartN;
3948  for(int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
3949  // check whether pre-neuron location is in RF of post-neuron
3950  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
3951  if (!isPoint3DinRF(connIt->connRadius, locPre, locPost))
3952  continue;
3953 
3954  if (drand48() < connIt->connProbability) {
3955  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3956  connIt->numberOfConnections++;
3957  }
3958  }
3959  }
3960 
3961  std::list<GroupConfigMD>::iterator grpIt;
3962  GroupConfigMD targetGrp;
3963 
3964  // update numPostSynapses and numPreSynapses of groups in the local network
3965  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3966  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3967  assert(grpIt != groupPartitionLists[netId].end());
3968  grpIt->numPostSynapses += connIt->numberOfConnections;
3969 
3970  targetGrp.gGrpId = grpDest; // the other fields does not matter
3971  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3972  assert(grpIt != groupPartitionLists[netId].end());
3973  grpIt->numPreSynapses += connIt->numberOfConnections;
3974 
3975  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3976  if (isExternal) {
3977  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3978  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3979  assert(grpIt != groupPartitionLists[externalNetId].end());
3980  grpIt->numPostSynapses += connIt->numberOfConnections;
3981 
3982  targetGrp.gGrpId = grpDest; // the other fields does not matter
3983  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3984  assert(grpIt != groupPartitionLists[externalNetId].end());
3985  grpIt->numPreSynapses += connIt->numberOfConnections;
3986  }
3987 }
3988 
3989 // FIXME: rewrite user-define call-back function
3990 // user-defined functions called here...
3991 // This is where we define our user-defined call-back function. -- KDC
3992 void SNN::connectUserDefined(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3993  int grpSrc = connIt->grpSrc;
3994  int grpDest = connIt->grpDest;
3995  int externalNetId = -1;
3996 
3997  if (isExternal) {
3998  externalNetId = groupConfigMDMap[grpDest].netId;
3999  assert(netId != externalNetId);
4000  }
4001 
4002  connIt->maxDelay = 0;
4003  int preStartN = groupConfigMDMap[grpSrc].gStartN;
4004  int postStartN = groupConfigMDMap[grpDest].gStartN;
4005  for (int pre_nid = groupConfigMDMap[grpSrc].gStartN; pre_nid <= groupConfigMDMap[grpSrc].gEndN; pre_nid++) {
4006  //Point3D loc_pre = getNeuronLocation3D(pre_nid); // 3D coordinates of i
4007  for (int post_nid = groupConfigMDMap[grpDest].gStartN; post_nid <= groupConfigMDMap[grpDest].gEndN; post_nid++) {
4008  float weight, maxWt, delay;
4009  bool connected;
4010 
4011  connIt->conn->connect(this, grpSrc, pre_nid - preStartN, grpDest, post_nid - postStartN, weight, maxWt, delay, connected);
4012  if (connected) {
4013  assert(delay >= 1);
4014  assert(delay <= MAX_SYN_DELAY);
4015  assert(abs(weight) <= abs(maxWt));
4016 
4017  if (GET_FIXED_PLASTIC(connIt->connProp) == SYN_FIXED)
4018  maxWt = weight;
4019 
4020  if (fabs(maxWt) > connIt->maxWt)
4021  connIt->maxWt = fabs(maxWt);
4022 
4023  if (delay > connIt->maxDelay)
4024  connIt->maxDelay = delay;
4025 
4026  connectNeurons(netId, grpSrc, grpDest, pre_nid, post_nid, connIt->connId, weight, maxWt, delay, externalNetId);
4027  connIt->numberOfConnections++;
4028  }
4029  }
4030  }
4031 
4032  std::list<GroupConfigMD>::iterator grpIt;
4033  GroupConfigMD targetGrp;
4034 
4035  // update numPostSynapses and numPreSynapses of groups in the local network
4036  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4037  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4038  assert(grpIt != groupPartitionLists[netId].end());
4039  grpIt->numPostSynapses += connIt->numberOfConnections;
4040 
4041  targetGrp.gGrpId = grpDest; // the other fields does not matter
4042  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4043  assert(grpIt != groupPartitionLists[netId].end());
4044  grpIt->numPreSynapses += connIt->numberOfConnections;
4045 
4046  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
4047  if (isExternal) {
4048  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4049  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4050  assert(grpIt != groupPartitionLists[externalNetId].end());
4051  grpIt->numPostSynapses += connIt->numberOfConnections;
4052 
4053  targetGrp.gGrpId = grpDest; // the other fields does not matter
4054  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4055  assert(grpIt != groupPartitionLists[externalNetId].end());
4056  grpIt->numPreSynapses += connIt->numberOfConnections;
4057  }
4058 }
4059 
4061 //void SNN::connectFull(short int connId) {
4062 // int grpSrc = connectConfigMap[connId].grpSrc;
4063 // int grpDest = connectConfigMap[connId].grpDest;
4064 // bool noDirect = (connectConfigMap[connId].type == CONN_FULL_NO_DIRECT);
4065 //
4066 // // rebuild struct for easier handling
4067 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
4068 //
4069 // for(int i = groupConfigMap[grpSrc].StartN; i <= groupConfigMap[grpSrc].EndN; i++) {
4070 // Point3D loc_i = getNeuronLocation3D(i); // 3D coordinates of i
4071 // for(int j = groupConfigMap[grpDest].StartN; j <= groupConfigMap[grpDest].EndN; j++) { // j: the temp neuron id
4072 // // if flag is set, don't connect direct connections
4073 // if((noDirect) && (i - groupConfigMap[grpSrc].StartN) == (j - groupConfigMap[grpDest].StartN))
4074 // continue;
4075 //
4076 // // check whether pre-neuron location is in RF of post-neuron
4077 // Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
4078 // if (!isPoint3DinRF(radius, loc_i, loc_j))
4079 // continue;
4080 //
4081 // //uint8_t dVal = info->minDelay + (int)(0.5 + (drand48() * (info->maxDelay - info->minDelay)));
4082 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4083 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4084 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, i, grpSrc);
4085 //
4086 // setConnection(grpSrc, grpDest, i, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);// info->connId);
4087 // connectConfigMap[connId].numberOfConnections++;
4088 // }
4089 // }
4090 //
4091 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4092 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4093 //}
4094 
4095 //void SNN::connectGaussian(short int connId) {
4096 // // rebuild struct for easier handling
4097 // // adjust with sqrt(2) in order to make the Gaussian kernel depend on 2*sigma^2
4098 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
4099 //
4100 // // in case pre and post have different Grid3D sizes: scale pre to the grid size of post
4101 // int grpSrc = connectConfigMap[connId].grpSrc;
4102 // int grpDest = connectConfigMap[connId].grpDest;
4103 // Grid3D grid_i = getGroupGrid3D(grpSrc);
4104 // Grid3D grid_j = getGroupGrid3D(grpDest);
4105 // Point3D scalePre = Point3D(grid_j.numX, grid_j.numY, grid_j.numZ) / Point3D(grid_i.numX, grid_i.numY, grid_i.numZ);
4106 //
4107 // for(int i = groupConfigMap[grpSrc].StartN; i <= groupConfigMap[grpSrc].EndN; i++) {
4108 // Point3D loc_i = getNeuronLocation3D(i)*scalePre; // i: adjusted 3D coordinates
4109 //
4110 // for(int j = groupConfigMap[grpDest].StartN; j <= groupConfigMap[grpDest].EndN; j++) { // j: the temp neuron id
4111 // // check whether pre-neuron location is in RF of post-neuron
4112 // Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
4113 //
4114 // // make sure point is in RF
4115 // double rfDist = getRFDist3D(radius,loc_i,loc_j);
4116 // if (rfDist < 0.0 || rfDist > 1.0)
4117 // continue;
4118 //
4119 // // if rfDist is valid, it returns a number between 0 and 1
4120 // // we want these numbers to fit to Gaussian weigths, so that rfDist=0 corresponds to max Gaussian weight
4121 // // and rfDist=1 corresponds to 0.1 times max Gaussian weight
4122 // // so we're looking at gauss = exp(-a*rfDist), where a such that exp(-a)=0.1
4123 // // solving for a, we find that a = 2.3026
4124 // double gauss = exp(-2.3026*rfDist);
4125 // if (gauss < 0.1)
4126 // continue;
4127 //
4128 // if (drand48() < connectConfigMap[connId].p) {
4129 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4130 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4131 // float synWt = gauss * connectConfigMap[connId].initWt; // scale weight according to gauss distance
4132 // setConnection(grpSrc, grpDest, i, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);//info->connId);
4133 // connectConfigMap[connId].numberOfConnections++;
4134 // }
4135 // }
4136 // }
4137 //
4138 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4139 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4140 //}
4141 //
4142 //void SNN::connectOneToOne(short int connId) {
4143 // int grpSrc = connectConfigMap[connId].grpSrc;
4144 // int grpDest = connectConfigMap[connId].grpDest;
4145 // assert( groupConfigMap[grpDest].SizeN == groupConfigMap[grpSrc].SizeN );
4146 //
4147 // // NOTE: RadiusRF does not make a difference here: ignore
4148 // for(int nid=groupConfigMap[grpSrc].StartN,j=groupConfigMap[grpDest].StartN; nid<=groupConfigMap[grpSrc].EndN; nid++, j++) {
4149 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4150 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4151 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, nid, grpSrc);
4152 // setConnection(grpSrc, grpDest, nid, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);//info->connId);
4153 // connectConfigMap[connId].numberOfConnections++;
4154 // }
4155 //
4156 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4157 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4158 //}
4159 //
4161 //void SNN::connectRandom(short int connId) {
4162 // int grpSrc = connectConfigMap[connId].grpSrc;
4163 // int grpDest = connectConfigMap[connId].grpDest;
4164 //
4165 // // rebuild struct for easier handling
4166 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
4167 //
4168 // for(int pre_nid = groupConfigMap[grpSrc].StartN; pre_nid <= groupConfigMap[grpSrc].EndN; pre_nid++) {
4169 // Point3D loc_pre = getNeuronLocation3D(pre_nid); // 3D coordinates of i
4170 // for(int post_nid = groupConfigMap[grpDest].StartN; post_nid <= groupConfigMap[grpDest].EndN; post_nid++) {
4171 // // check whether pre-neuron location is in RF of post-neuron
4172 // Point3D loc_post = getNeuronLocation3D(post_nid); // 3D coordinates of j
4173 // if (!isPoint3DinRF(radius, loc_pre, loc_post))
4174 // continue;
4175 //
4176 // if (drand48() < connectConfigMap[connId].p) {
4177 // //uint8_t dVal = info->minDelay + (int)(0.5+(drand48()*(info->maxDelay-info->minDelay)));
4178 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4179 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4180 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, pre_nid, grpSrc);
4181 // setConnection(grpSrc, grpDest, pre_nid, post_nid, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId); //info->connId);
4182 // connectConfigMap[connId].numberOfConnections++;
4183 // }
4184 // }
4185 // }
4186 //
4187 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4188 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4189 //}
4190 //
4193 //void SNN::connectUserDefined(short int connId) {
4194 // int grpSrc = connectConfigMap[connId].grpSrc;
4195 // int grpDest = connectConfigMap[connId].grpDest;
4196 // connectConfigMap[connId].maxDelay = 0;
4197 // for(int nid=groupConfigMap[grpSrc].StartN; nid<=groupConfigMap[grpSrc].EndN; nid++) {
4198 // for(int nid2=groupConfigMap[grpDest].StartN; nid2 <= groupConfigMap[grpDest].EndN; nid2++) {
4199 // int srcId = nid - groupConfigMap[grpSrc].StartN;
4200 // int destId = nid2 - groupConfigMap[grpDest].StartN;
4201 // float weight, maxWt, delay;
4202 // bool connected;
4203 //
4204 // connectConfigMap[connId].conn->connect(this, grpSrc, srcId, grpDest, destId, weight, maxWt, delay, connected);
4205 // if(connected) {
4206 // if (GET_FIXED_PLASTIC(connectConfigMap[connId].connProp) == SYN_FIXED)
4207 // maxWt = weight;
4208 //
4209 // connectConfigMap[connId].maxWt = maxWt;
4210 //
4211 // assert(delay >= 1);
4212 // assert(delay <= MAX_SYN_DELAY);
4213 // assert(abs(weight) <= abs(maxWt));
4214 //
4215 // // adjust the sign of the weight based on inh/exc connection
4216 // weight = isExcitatoryGroup(grpSrc) ? fabs(weight) : -1.0*fabs(weight);
4217 // maxWt = isExcitatoryGroup(grpSrc) ? fabs(maxWt) : -1.0*fabs(maxWt);
4218 //
4219 // setConnection(grpSrc, grpDest, nid, nid2, weight, maxWt, delay, connectConfigMap[connId].connProp, connId);// info->connId);
4220 // connectConfigMap[connId].numberOfConnections++;
4221 // if(delay > connectConfigMap[connId].maxDelay) {
4222 // connectConfigMap[connId].maxDelay = delay;
4223 // }
4224 // }
4225 // }
4226 // }
4227 //
4228 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4229 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4230 //}
4231 
4232 void SNN::deleteRuntimeData() {
4233  // FIXME: assert simulation use GPU first
4234  // wait for kernels to complete
4235 #ifndef __NO_CUDA__
4236  CUDA_CHECK_ERRORS(cudaThreadSynchronize());
4237 #endif
4238 
4239  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4240  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
4241  cpu_set_t cpus;
4242  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
4243  int threadCount = 0;
4244  #endif
4245 
4246  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4247  if (!groupPartitionLists[netId].empty()) {
4248  if (netId < CPU_RUNTIME_BASE) // GPU runtime
4249  deleteRuntimeData_GPU(netId);
4250  else{ // CPU runtime
4251  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4252  deleteRuntimeData_CPU(netId);
4253  #else // Linux or MAC
4254  pthread_attr_t attr;
4255  pthread_attr_init(&attr);
4256  CPU_ZERO(&cpus);
4257  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
4258  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
4259 
4260  argsThreadRoutine[threadCount].snn_pointer = this;
4261  argsThreadRoutine[threadCount].netId = netId;
4262  argsThreadRoutine[threadCount].lGrpId = 0;
4263  argsThreadRoutine[threadCount].startIdx = 0;
4264  argsThreadRoutine[threadCount].endIdx = 0;
4265  argsThreadRoutine[threadCount].GtoLOffset = 0;
4266 
4267  pthread_create(&threads[threadCount], &attr, &SNN::helperDeleteRuntimeData_CPU, (void*)&argsThreadRoutine[threadCount]);
4268  pthread_attr_destroy(&attr);
4269  threadCount++;
4270  #endif
4271  }
4272  }
4273  }
4274 
4275  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4276  // join all the threads
4277  for (int i=0; i<threadCount; i++){
4278  pthread_join(threads[i], NULL);
4279  }
4280  #endif
4281 
4282 #ifndef __NO_CUDA__
4283  CUDA_DELETE_TIMER(timer);
4284 #endif
4285 }
4286 
4287 // delete all objects (CPU and GPU side)
4288 void SNN::deleteObjects() {
4289  if (simulatorDeleted)
4290  return;
4291 
4292  printSimSummary();
4293 
4294  // deallocate objects
4295  resetMonitors(true);
4296  resetConnectionConfigs(true);
4297 
4298  // delete manager runtime data
4299  deleteManagerRuntimeData();
4300 
4301  deleteRuntimeData();
4302 
4303  // fclose file streams, unless in custom mode
4304  if (loggerMode_ != CUSTOM) {
4305  // don't fclose if it's stdout or stderr, otherwise they're gonna stay closed for the rest of the process
4306  if (fpInf_ != NULL && fpInf_ != stdout && fpInf_ != stderr)
4307  fclose(fpInf_);
4308  if (fpErr_ != NULL && fpErr_ != stdout && fpErr_ != stderr)
4309  fclose(fpErr_);
4310  if (fpDeb_ != NULL && fpDeb_ != stdout && fpDeb_ != stderr)
4311  fclose(fpDeb_);
4312  if (fpLog_ != NULL && fpLog_ != stdout && fpLog_ != stderr)
4313  fclose(fpLog_);
4314  }
4315 
4316  simulatorDeleted = true;
4317 }
4318 
4319 void SNN::findMaxNumSynapsesGroups(int* _maxNumPostSynGrp, int* _maxNumPreSynGrp) {
4320  *_maxNumPostSynGrp = 0;
4321  *_maxNumPreSynGrp = 0;
4322 
4323  // scan all the groups and find the required information
4324  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4325  // find the values for maximum postsynaptic length
4326  // and maximum pre-synaptic length
4327  if (groupConfigMDMap[gGrpId].numPostSynapses > *_maxNumPostSynGrp)
4328  *_maxNumPostSynGrp = groupConfigMDMap[gGrpId].numPostSynapses;
4329  if (groupConfigMDMap[gGrpId].numPreSynapses > *_maxNumPreSynGrp)
4330  *_maxNumPreSynGrp = groupConfigMDMap[gGrpId].numPreSynapses;
4331  }
4332 }
4333 
4334 void SNN::findMaxNumSynapsesNeurons(int _netId, int& _maxNumPostSynN, int& _maxNumPreSynN) {
4335  int *tempNpre, *tempNpost;
4336  int nSrc, nDest, numNeurons;
4337  std::map<int, int> globalToLocalOffset;
4338 
4339  numNeurons = networkConfigs[_netId].numNAssigned;
4340  tempNpre = new int[numNeurons];
4341  tempNpost = new int[numNeurons];
4342  memset(tempNpre, 0, sizeof(int) * numNeurons);
4343  memset(tempNpost, 0, sizeof(int) * numNeurons);
4344 
4345  // load offset between global neuron id and local neuron id
4346  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4347  globalToLocalOffset[grpIt->gGrpId] = grpIt->GtoLOffset;
4348  }
4349 
4350  // calculate number of pre- and post- connections of each neuron
4351  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[_netId].begin(); connIt != connectionLists[_netId].end(); connIt++) {
4352  nSrc = connIt->nSrc + globalToLocalOffset[connIt->grpSrc];
4353  nDest = connIt->nDest + globalToLocalOffset[connIt->grpDest];
4354  assert(nSrc < numNeurons); assert(nDest < numNeurons);
4355  tempNpost[nSrc]++;
4356  tempNpre[nDest]++;
4357  }
4358 
4359  // find out the maximum number of pre- and post- connections among neurons in a local network
4360  _maxNumPostSynN = 0;
4361  _maxNumPreSynN = 0;
4362  for (int nId = 0; nId < networkConfigs[_netId].numN; nId++) {
4363  if (tempNpost[nId] > _maxNumPostSynN) _maxNumPostSynN = tempNpost[nId];
4364  if (tempNpre[nId] > _maxNumPreSynN) _maxNumPreSynN = tempNpre[nId];
4365  }
4366 
4367  delete [] tempNpre;
4368  delete [] tempNpost;
4369 }
4370 
4371 void SNN::findMaxSpikesD1D2(int _netId, unsigned int& _maxSpikesD1, unsigned int& _maxSpikesD2) {
4372  _maxSpikesD1 = 0; _maxSpikesD2 = 0;
4373  for(std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4374  if (grpIt->maxOutgoingDelay == 1)
4375  _maxSpikesD1 += (groupConfigMap[grpIt->gGrpId].numN * NEURON_MAX_FIRING_RATE);
4376  else
4377  _maxSpikesD2 += (groupConfigMap[grpIt->gGrpId].numN * NEURON_MAX_FIRING_RATE);
4378  }
4379 }
4380 
4381 void SNN::findNumN(int _netId, int& _numN, int& _numNExternal, int& _numNAssigned,
4382  int& _numNReg, int& _numNExcReg, int& _numNInhReg,
4383  int& _numNPois, int& _numNExcPois, int& _numNInhPois) {
4384  _numN = 0; _numNExternal = 0; _numNAssigned = 0;
4385  _numNReg = 0; _numNExcReg = 0; _numNInhReg = 0;
4386  _numNPois = 0; _numNExcPois = 0; _numNInhPois = 0;
4387  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4388  int sizeN = groupConfigMap[grpIt->gGrpId].numN;
4389  unsigned int type = groupConfigMap[grpIt->gGrpId].type;
4390  if (IS_EXCITATORY_TYPE(type) && (type & POISSON_NEURON) && grpIt->netId == _netId) {
4391  _numN += sizeN; _numNPois += sizeN; _numNExcPois += sizeN;
4392  } else if (IS_INHIBITORY_TYPE(type) && (type & POISSON_NEURON) && grpIt->netId == _netId) {
4393  _numN += sizeN; _numNPois += sizeN; _numNInhPois += sizeN;
4394  } else if (IS_EXCITATORY_TYPE(type) && !(type & POISSON_NEURON) && grpIt->netId == _netId) {
4395  _numN += sizeN; _numNReg += sizeN; _numNExcReg += sizeN;
4396  } else if (IS_INHIBITORY_TYPE(type) && !(type & POISSON_NEURON) && grpIt->netId == _netId) {
4397  _numN += sizeN; _numNReg += sizeN; _numNInhReg += sizeN;
4398  } else if (grpIt->netId != _netId) {
4399  _numNExternal += sizeN;
4400  } else {
4401  KERNEL_ERROR("Can't find catagory for the group [%d] ", grpIt->gGrpId);
4402  exitSimulation(-1);
4403  }
4404  _numNAssigned += sizeN;
4405  }
4406 
4407  assert(_numNReg == _numNExcReg + _numNInhReg);
4408  assert(_numNPois == _numNExcPois + _numNInhPois);
4409  assert(_numN == _numNReg + _numNPois);
4410  assert(_numNAssigned == _numN + _numNExternal);
4411 }
4412 
4413 void SNN::findNumNSpikeGenAndOffset(int _netId) {
4414  networkConfigs[_netId].numNSpikeGen = 0;
4415 
4416  for(int lGrpId = 0; lGrpId < networkConfigs[_netId].numGroups; lGrpId++) {
4417  if (_netId == groupConfigs[_netId][lGrpId].netId && groupConfigs[_netId][lGrpId].isSpikeGenerator && groupConfigs[_netId][lGrpId].isSpikeGenFunc) {
4418  groupConfigs[_netId][lGrpId].Noffset = networkConfigs[_netId].numNSpikeGen;
4419  networkConfigs[_netId].numNSpikeGen += groupConfigs[_netId][lGrpId].numN;
4420  }
4421  }
4422 
4423  assert(networkConfigs[_netId].numNSpikeGen <= networkConfigs[_netId].numNPois);
4424 }
4425 
4426 void SNN::findNumSynapsesNetwork(int _netId, int& _numPostSynNet, int& _numPreSynNet) {
4427  _numPostSynNet = 0;
4428  _numPreSynNet = 0;
4429 
4430  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4431  _numPostSynNet += grpIt->numPostSynapses;
4432  _numPreSynNet += grpIt->numPreSynapses;
4433  assert(_numPostSynNet < INT_MAX);
4434  assert(_numPreSynNet < INT_MAX);
4435  }
4436 
4437  assert(_numPreSynNet == _numPostSynNet);
4438 }
4439 
4440 void SNN::fetchGroupState(int netId, int lGrpId) {
4441  if (netId < CPU_RUNTIME_BASE)
4442  copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4443  else
4444  copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false);
4445 }
4446 
4447 void SNN::fetchWeightState(int netId, int lGrpId) {
4448  if (netId < CPU_RUNTIME_BASE)
4449  copyWeightState(netId, lGrpId, cudaMemcpyDeviceToHost);
4450  else
4451  copyWeightState(netId, lGrpId);
4452 }
4453 
4459 void SNN::fetchNeuronSpikeCount (int gGrpId) {
4460  if (gGrpId == ALL) {
4461  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4462  fetchNeuronSpikeCount(gGrpId);
4463  }
4464  } else {
4465  int netId = groupConfigMDMap[gGrpId].netId;
4466  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4467  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4468 
4469  if (netId < CPU_RUNTIME_BASE)
4470  copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4471  else
4472  copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4473  }
4474 }
4475 
4476 void SNN::fetchSTPState(int gGrpId) {
4477 }
4478 
4484 void SNN::fetchConductanceAMPA(int gGrpId) {
4485  if (gGrpId == ALL) {
4486  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4487  fetchConductanceAMPA(gGrpId);
4488  }
4489  } else {
4490  int netId = groupConfigMDMap[gGrpId].netId;
4491  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4492  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4493 
4494  if (netId < CPU_RUNTIME_BASE)
4495  copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4496  else
4497  copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4498  }
4499 }
4500 
4506 void SNN::fetchConductanceNMDA(int gGrpId) {
4507  if (gGrpId == ALL) {
4508  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4509  fetchConductanceNMDA(gGrpId);
4510  }
4511  } else {
4512  int netId = groupConfigMDMap[gGrpId].netId;
4513  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4514  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4515 
4516  if (netId < CPU_RUNTIME_BASE)
4517  copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4518  else
4519  copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4520  }
4521 }
4522 
4528 void SNN::fetchConductanceGABAa(int gGrpId) {
4529  if (gGrpId == ALL) {
4530  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4531  fetchConductanceGABAa(gGrpId);
4532  }
4533  } else {
4534  int netId = groupConfigMDMap[gGrpId].netId;
4535  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4536  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4537 
4538  if (netId < CPU_RUNTIME_BASE)
4539  copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4540  else
4541  copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4542  }
4543 }
4544 
4550 void SNN::fetchConductanceGABAb(int gGrpId) {
4551  if (gGrpId == ALL) {
4552  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4553  fetchConductanceGABAb(gGrpId);
4554  }
4555  } else {
4556  int netId = groupConfigMDMap[gGrpId].netId;
4557  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4558  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4559 
4560  if (netId < CPU_RUNTIME_BASE)
4561  copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4562  else
4563  copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4564  }
4565 }
4566 
4567 
4568 void SNN::fetchGrpIdsLookupArray(int netId) {
4569  if (netId < CPU_RUNTIME_BASE)
4570  copyGrpIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4571  else
4572  copyGrpIdsLookupArray(netId);
4573 }
4574 
4575 void SNN::fetchConnIdsLookupArray(int netId) {
4576  if (netId < CPU_RUNTIME_BASE)
4577  copyConnIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4578  else
4579  copyConnIdsLookupArray(netId);
4580 }
4581 
4582 void SNN::fetchLastSpikeTime(int netId) {
4583  if (netId < CPU_RUNTIME_BASE)
4584  copyLastSpikeTime(netId, cudaMemcpyDeviceToHost);
4585  else
4586  copyLastSpikeTime(netId);
4587 }
4588 
4589 void SNN::fetchPreConnectionInfo(int netId) {
4590  if (netId < CPU_RUNTIME_BASE)
4591  copyPreConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4592  else
4593  copyPreConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], false);
4594 }
4595 
4596 void SNN::fetchPostConnectionInfo(int netId) {
4597  if (netId < CPU_RUNTIME_BASE)
4598  copyPostConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4599  else
4600  copyPostConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], false);
4601 }
4602 
4603 void SNN::fetchSynapseState(int netId) {
4604  if (netId < CPU_RUNTIME_BASE)
4605  copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4606  else
4607  copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], false);
4608 }
4609 
4610 
4614 void SNN::fetchNetworkSpikeCount() {
4615  unsigned int spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2;
4616 
4617  managerRuntimeData.spikeCountD1 = 0;
4618  managerRuntimeData.spikeCountD2 = 0;
4619  managerRuntimeData.spikeCountExtRxD2 = 0;
4620  managerRuntimeData.spikeCountExtRxD1 = 0;
4621  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4622  if (!groupPartitionLists[netId].empty()) {
4623 
4624  if (netId < CPU_RUNTIME_BASE) {
4625  copyNetworkSpikeCount(netId, cudaMemcpyDeviceToHost,
4626  &spikeCountD1, &spikeCountD2,
4627  &spikeCountExtD1, &spikeCountExtD2);
4628  //printf("netId:%d, D1:%d/D2:%d, extD1:%d/D2:%d\n", netId, spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2);
4629  } else {
4630  copyNetworkSpikeCount(netId,
4631  &spikeCountD1, &spikeCountD2,
4632  &spikeCountExtD1, &spikeCountExtD2);
4633  //printf("netId:%d, D1:%d/D2:%d, extD1:%d/D2:%d\n", netId, spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2);
4634  }
4635 
4636  managerRuntimeData.spikeCountD2 += spikeCountD2 - spikeCountExtD2;
4637  managerRuntimeData.spikeCountD1 += spikeCountD1 - spikeCountExtD1;
4638  managerRuntimeData.spikeCountExtRxD2 += spikeCountExtD2;
4639  managerRuntimeData.spikeCountExtRxD1 += spikeCountExtD1;
4640  }
4641  }
4642 
4643  managerRuntimeData.spikeCount = managerRuntimeData.spikeCountD1 + managerRuntimeData.spikeCountD2;
4644 }
4645 
4646 void SNN::fetchSpikeTables(int netId) {
4647  if (netId < CPU_RUNTIME_BASE)
4648  copySpikeTables(netId, cudaMemcpyDeviceToHost);
4649  else
4650  copySpikeTables(netId);
4651 }
4652 
4653 void SNN::fetchNeuronStateBuffer(int netId, int lGrpId) {
4654  if (netId < CPU_RUNTIME_BASE)
4655  copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4656  else
4657  copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false);
4658 }
4659 
4660 void SNN::fetchExtFiringTable(int netId) {
4661  assert(netId < MAX_NET_PER_SNN);
4662 
4663  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
4664  copyExtFiringTable(netId, cudaMemcpyDeviceToHost);
4665  } else { // CPU runtime
4666  copyExtFiringTable(netId);
4667  }
4668 }
4669 
4670 void SNN::fetchTimeTable(int netId) {
4671  assert(netId < MAX_NET_PER_SNN);
4672 
4673  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
4674  copyTimeTable(netId, cudaMemcpyDeviceToHost);
4675  } else {
4676  copyTimeTable(netId, true);
4677  }
4678 }
4679 
4680 void SNN::writeBackTimeTable(int netId) {
4681  assert(netId < MAX_NET_PER_SNN);
4682 
4683  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
4684  copyTimeTable(netId, cudaMemcpyHostToDevice);
4685  } else {
4686  copyTimeTable(netId, false);
4687  }
4688 }
4689 
4690 void SNN::transferSpikes(void* dest, int destNetId, void* src, int srcNetId, int size) {
4691 #ifndef __NO_CUDA__
4692  if (srcNetId < CPU_RUNTIME_BASE && destNetId < CPU_RUNTIME_BASE) {
4693  checkAndSetGPUDevice(destNetId);
4694  CUDA_CHECK_ERRORS(cudaMemcpyPeer(dest, destNetId, src, srcNetId, size));
4695  } else if (srcNetId >= CPU_RUNTIME_BASE && destNetId < CPU_RUNTIME_BASE) {
4696  checkAndSetGPUDevice(destNetId);
4697  CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice));
4698  } else if (srcNetId < CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE) {
4699  checkAndSetGPUDevice(srcNetId);
4700  CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost));
4701  } else if(srcNetId >= CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE) {
4702  memcpy(dest, src, size);
4703  }
4704 #else
4705  assert(srcNetId >= CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE);
4706  memcpy(dest, src, size);
4707 #endif
4708 }
4709 
4710 void SNN::convertExtSpikesD2(int netId, int startIdx, int endIdx, int GtoLOffset) {
4711  if (netId < CPU_RUNTIME_BASE)
4712  convertExtSpikesD2_GPU(netId, startIdx, endIdx, GtoLOffset);
4713  else
4714  convertExtSpikesD2_CPU(netId, startIdx, endIdx, GtoLOffset);
4715 }
4716 
4717 void SNN::convertExtSpikesD1(int netId, int startIdx, int endIdx, int GtoLOffset) {
4718  if (netId < CPU_RUNTIME_BASE)
4719  convertExtSpikesD1_GPU(netId, startIdx, endIdx, GtoLOffset);
4720  else
4721  convertExtSpikesD1_CPU(netId, startIdx, endIdx, GtoLOffset);
4722 }
4723 
4724 void SNN::routeSpikes() {
4725  int firingTableIdxD2, firingTableIdxD1;
4726  int GtoLOffset;
4727 
4728  for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) {
4729  int srcNetId = rteItr->srcNetId;
4730  int destNetId = rteItr->destNetId;
4731 
4732  fetchExtFiringTable(srcNetId);
4733 
4734  fetchTimeTable(destNetId);
4735  firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
4736  firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
4737  //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
4738  //printf("srcNetId %d,destNetId %d, D1:%d/D2:%d\n", srcNetId, destNetId, firingTableIdxD1, firingTableIdxD2);
4739 
4740  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4741  pthread_t threads[(2 * networkConfigs[srcNetId].numGroups) + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
4742  cpu_set_t cpus;
4743  ThreadStruct argsThreadRoutine[(2 * networkConfigs[srcNetId].numGroups) + 1]; // same as above, +1 array size
4744  int threadCount = 0;
4745  #endif
4746 
4747  for (int lGrpId = 0; lGrpId < networkConfigs[srcNetId].numGroups; lGrpId++) {
4748  if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
4749  // search GtoLOffset of the neural group at destination local network
4750  bool isFound = false;
4751  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4752  if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4753  GtoLOffset = grpIt->GtoLOffset;
4754  isFound = true;
4755  break;
4756  }
4757  }
4758 
4759  if (isFound) {
4760  transferSpikes(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId,
4761  managerRuntimeData.extFiringTableD2[lGrpId], srcNetId,
4762  sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]);
4763 
4764  if (destNetId < CPU_RUNTIME_BASE){
4765  convertExtSpikesD2_GPU(destNetId, firingTableIdxD2,
4766  firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
4767  GtoLOffset); // [StartIdx, EndIdx)
4768  }
4769  else{// CPU runtime
4770  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4771  convertExtSpikesD2_CPU(destNetId, firingTableIdxD2,
4772  firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
4773  GtoLOffset); // [StartIdx, EndIdx)
4774  #else // Linux or MAC
4775  pthread_attr_t attr;
4776  pthread_attr_init(&attr);
4777  CPU_ZERO(&cpus);
4778  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
4779  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
4780 
4781  argsThreadRoutine[threadCount].snn_pointer = this;
4782  argsThreadRoutine[threadCount].netId = destNetId;
4783  argsThreadRoutine[threadCount].lGrpId = 0;
4784  argsThreadRoutine[threadCount].startIdx = firingTableIdxD2;
4785  argsThreadRoutine[threadCount].endIdx = firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
4786  argsThreadRoutine[threadCount].GtoLOffset = GtoLOffset;
4787 
4788  pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD2_CPU, (void*)&argsThreadRoutine[threadCount]);
4789  pthread_attr_destroy(&attr);
4790  threadCount++;
4791  #endif
4792  }
4793 
4794  firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
4795  }
4796  }
4797 
4798  if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
4799  // search GtoLOffset of the neural group at destination local network
4800  bool isFound = false;
4801  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4802  if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4803  GtoLOffset = grpIt->GtoLOffset;
4804  isFound = true;
4805  break;
4806  }
4807  }
4808 
4809  if (isFound) {
4810  transferSpikes(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId,
4811  managerRuntimeData.extFiringTableD1[lGrpId], srcNetId,
4812  sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]);
4813  if (destNetId < CPU_RUNTIME_BASE){
4814  convertExtSpikesD1_GPU(destNetId, firingTableIdxD1,
4815  firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
4816  GtoLOffset); // [StartIdx, EndIdx)
4817  }
4818  else{// CPU runtime
4819  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4820  convertExtSpikesD1_CPU(destNetId, firingTableIdxD1,
4821  firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
4822  GtoLOffset); // [StartIdx, EndIdx)
4823  #else // Linux or MAC
4824  pthread_attr_t attr;
4825  pthread_attr_init(&attr);
4826  CPU_ZERO(&cpus);
4827  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
4828  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
4829 
4830  argsThreadRoutine[threadCount].snn_pointer = this;
4831  argsThreadRoutine[threadCount].netId = destNetId;
4832  argsThreadRoutine[threadCount].lGrpId = 0;
4833  argsThreadRoutine[threadCount].startIdx = firingTableIdxD1;
4834  argsThreadRoutine[threadCount].endIdx = firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
4835  argsThreadRoutine[threadCount].GtoLOffset = GtoLOffset;
4836 
4837  pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD1_CPU, (void*)&argsThreadRoutine[threadCount]);
4838  pthread_attr_destroy(&attr);
4839  threadCount++;
4840  #endif
4841  }
4842  firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
4843  }
4844  }
4845  //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
4846  }
4847 
4848  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4849  // join all the threads
4850  for (int i=0; i<threadCount; i++){
4851  pthread_join(threads[i], NULL);
4852  }
4853  #endif
4854 
4855  managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
4856  managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
4857  writeBackTimeTable(destNetId);
4858  }
4859 }
4860 
4861 //We need pass the neuron id (nid) and the grpId just for the case when we want to
4862 //ramp up/down the weights. In that case we need to set the weights of each synapse
4863 //depending on their nid (their position with respect to one another). -- KDC
4864 float SNN::generateWeight(int connProp, float initWt, float maxWt, int nid, int grpId) {
4865  float actWts;
4867  //bool setRandomWeights = GET_INITWTS_RANDOM(connProp);
4868  //bool setRampDownWeights = GET_INITWTS_RAMPDOWN(connProp);
4869  //bool setRampUpWeights = GET_INITWTS_RAMPUP(connProp);
4870 
4871  //if (setRandomWeights)
4872  // actWts = initWt * drand48();
4873  //else if (setRampUpWeights)
4874  // actWts = (initWt + ((nid - groupConfigs[0][grpId].StartN) * (maxWt - initWt) / groupConfigs[0][grpId].SizeN));
4875  //else if (setRampDownWeights)
4876  // actWts = (maxWt - ((nid - groupConfigs[0][grpId].StartN) * (maxWt - initWt) / groupConfigs[0][grpId].SizeN));
4877  //else
4878  actWts = initWt;
4879 
4880  return actWts;
4881 }
4882 
4883 // checks whether a connection ID contains plastic synapses O(#connections)
4884 bool SNN::isConnectionPlastic(short int connId) {
4885  assert(connId != ALL);
4886  assert(connId < numConnections);
4887 
4888  return GET_FIXED_PLASTIC(connectConfigMap[connId].connProp);
4889 }
4890 
4891 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
4892 // or groupConfigMap is not sync with groupConfigs[0][]
4893 // returns whether group has homeostasis enabled
4895  assert(grpId>=0 && grpId<getNumGroups());
4896  return (groupConfigMap[grpId].homeoConfig.WithHomeostasis);
4897 }
4898 
4899 // performs various verification checkups before building the network
4900 void SNN::verifyNetwork() {
4901  // make sure number of neuron parameters have been accumulated correctly
4902  // NOTE: this used to be updateParameters
4903  //verifyNumNeurons();
4904 
4905  // make sure compartment config is valid
4906  verifyCompartments();
4907 
4908  // make sure STDP post-group has some incoming plastic connections
4909  verifySTDP();
4910 
4911  // make sure every group with homeostasis also has STDP
4912  verifyHomeostasis();
4913 
4914  // make sure the max delay is within bound
4915  assert(glbNetworkConfig.maxDelay <= MAX_SYN_DELAY);
4916 
4917  // make sure there is sufficient buffer
4918  //if ((networkConfigs[0].maxSpikesD1 + networkConfigs[0].maxSpikesD2) < (numNExcReg + numNInhReg + numNPois) * UNKNOWN_NEURON_MAX_FIRING_RATE) {
4919  // KERNEL_ERROR("Insufficient amount of buffer allocated...");
4920  // exitSimulation(1);
4921  //}
4922 
4923  //make sure the number of pre- and post-connection does not exceed the limitation
4924  //if (maxNumPostSynGrp > MAX_NUM_POST_SYN) {
4925  // for (int g = 0; g < numGroups; g++) {
4926  // if (groupConfigMap[g].numPostSynapses>MAX_NUM_POST_SYN)
4927  // KERNEL_ERROR("Grp: %s(%d) has too many output synapses (%d), max %d.",groupInfo[g].Name.c_str(),g,
4928  // groupConfigMap[g].numPostSynapses,MAX_NUM_POST_SYN);
4929  // }
4930  // assert(maxNumPostSynGrp <= MAX_NUM_POST_SYN);
4931  //}
4932 
4933  //if (maxNumPreSynGrp > MAX_NUM_PRE_SYN) {
4934  // for (int g = 0; g < numGroups; g++) {
4935  // if (groupConfigMap[g].numPreSynapses>MAX_NUM_PRE_SYN)
4936  // KERNEL_ERROR("Grp: %s(%d) has too many input synapses (%d), max %d.",groupInfo[g].Name.c_str(),g,
4937  // groupConfigMap[g].numPreSynapses,MAX_NUM_PRE_SYN);
4938  // }
4939  // assert(maxNumPreSynGrp <= MAX_NUM_PRE_SYN);
4940  //}
4941 
4942  // make sure maxDelay == 1 if STP is enableed
4943  // \FIXME: need to figure out STP buffer for delays > 1
4944  if (sim_with_stp && glbNetworkConfig.maxDelay > 1) {
4945  KERNEL_ERROR("STP with delays > 1 ms is currently not supported.");
4946  exitSimulation(1);
4947  }
4948 
4949  if (glbNetworkConfig.maxDelay > MAX_SYN_DELAY) {
4950  KERNEL_ERROR("You are using a synaptic delay (%d) greater than MAX_SYN_DELAY defined in config.h", glbNetworkConfig.maxDelay);
4951  exitSimulation(1);
4952  }
4953 }
4954 
4955 void SNN::verifyCompartments() {
4956  for (std::map<int, compConnectConfig>::iterator it = compConnectConfigMap.begin(); it != compConnectConfigMap.end(); it++)
4957  {
4958  int grpLower = it->second.grpSrc;
4959  int grpUpper = it->second.grpDest;
4960 
4961  // make sure groups are compartmentally enabled
4962  if (!groupConfigMap[grpLower].withCompartments) {
4963  KERNEL_ERROR("Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4964  groupConfigMap[grpLower].grpName.c_str(), grpLower);
4965  exitSimulation(1);
4966  }
4967  if (!groupConfigMap[grpUpper].withCompartments) {
4968  KERNEL_ERROR("Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4969  groupConfigMap[grpUpper].grpName.c_str(), grpUpper);
4970  exitSimulation(1);
4971  }
4972  }
4973 }
4974 
4975 // checks whether STDP is set on a post-group with incoming plastic connections
4976 void SNN::verifySTDP() {
4977  for (int gGrpId=0; gGrpId<getNumGroups(); gGrpId++) {
4978  if (groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
4979  // for each post-group, check if any of the incoming connections are plastic
4980  bool isAnyPlastic = false;
4981  for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
4982  if (it->second.grpDest == gGrpId) {
4983  // get syn wt type from connection property
4984  isAnyPlastic |= GET_FIXED_PLASTIC(it->second.connProp);
4985  if (isAnyPlastic) {
4986  // at least one plastic connection found: break while
4987  break;
4988  }
4989  }
4990  }
4991  if (!isAnyPlastic) {
4992  KERNEL_ERROR("If STDP on group %d (%s) is set, group must have some incoming plastic connections.",
4993  gGrpId, groupConfigMap[gGrpId].grpName.c_str());
4994  exitSimulation(1);
4995  }
4996  }
4997  }
4998 }
4999 
5000 // checks whether every group with Homeostasis also has STDP
5001 void SNN::verifyHomeostasis() {
5002  for (int gGrpId=0; gGrpId<getNumGroups(); gGrpId++) {
5003  if (groupConfigMap[gGrpId].homeoConfig.WithHomeostasis) {
5004  if (!groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
5005  KERNEL_ERROR("If homeostasis is enabled on group %d (%s), then STDP must be enabled, too.",
5006  gGrpId, groupConfigMap[gGrpId].grpName.c_str());
5007  exitSimulation(1);
5008  }
5009  }
5010  }
5011 }
5012 
5014 //void SNN::verifyNumNeurons() {
5015 // int nExcPois = 0;
5016 // int nInhPois = 0;
5017 // int nExcReg = 0;
5018 // int nInhReg = 0;
5019 //
5020 // // scan all the groups and find the required information
5021 // // about the group (numN, numPostSynapses, numPreSynapses and others).
5022 // for(int g=0; g<numGroups; g++) {
5023 // if (groupConfigMap[g].Type==UNKNOWN_NEURON) {
5024 // KERNEL_ERROR("Unknown group for %d (%s)", g, groupInfo[g].Name.c_str());
5025 // exitSimulation(1);
5026 // }
5027 //
5028 // if (IS_INHIBITORY_TYPE(groupConfigMap[g].Type) && !(groupConfigMap[g].Type & POISSON_NEURON))
5029 // nInhReg += groupConfigMap[g].SizeN;
5030 // else if (IS_EXCITATORY_TYPE(groupConfigMap[g].Type) && !(groupConfigMap[g].Type & POISSON_NEURON))
5031 // nExcReg += groupConfigMap[g].SizeN;
5032 // else if (IS_EXCITATORY_TYPE(groupConfigMap[g].Type) && (groupConfigMap[g].Type & POISSON_NEURON))
5033 // nExcPois += groupConfigMap[g].SizeN;
5034 // else if (IS_INHIBITORY_TYPE(groupConfigMap[g].Type) && (groupConfigMap[g].Type & POISSON_NEURON))
5035 // nInhPois += groupConfigMap[g].SizeN;
5036 // }
5037 //
5038 // // check the newly gathered information with class members
5039 // if (numN != nExcReg+nInhReg+nExcPois+nInhPois) {
5040 // KERNEL_ERROR("nExcReg+nInhReg+nExcPois+nInhPois=%d does not add up to numN=%d",
5041 // nExcReg+nInhReg+nExcPois+nInhPois, numN);
5042 // exitSimulation(1);
5043 // }
5044 // if (numNReg != nExcReg+nInhReg) {
5045 // KERNEL_ERROR("nExcReg+nInhReg=%d does not add up to numNReg=%d", nExcReg+nInhReg, numNReg);
5046 // exitSimulation(1);
5047 // }
5048 // if (numNPois != nExcPois+nInhPois) {
5049 // KERNEL_ERROR("nExcPois+nInhPois=%d does not add up to numNPois=%d", nExcPois+nInhPois, numNPois);
5050 // exitSimulation(1);
5051 // }
5052 //
5053 // //printf("numN=%d == %d\n",numN,nExcReg+nInhReg+nExcPois+nInhPois);
5054 // //printf("numNReg=%d == %d\n",numNReg, nExcReg+nInhReg);
5055 // //printf("numNPois=%d == %d\n",numNPois, nExcPois+nInhPois);
5056 //
5057 // assert(numN <= 1000000);
5058 // assert((numN > 0) && (numN == numNExcReg + numNInhReg + numNPois));
5059 //}
5060 
5061 // \FIXME: not sure where this should go... maybe create some helper file?
5062 bool SNN::isPoint3DinRF(const RadiusRF& radius, const Point3D& pre, const Point3D& post) {
5063  // Note: RadiusRF rad is assumed to be the fanning in to the post neuron. So if the radius is 10 pixels, it means
5064  // that if you look at the post neuron, it will receive input from neurons that code for locations no more than
5065  // 10 pixels away. (The opposite is called a response/stimulus field.)
5066 
5067  double rfDist = getRFDist3D(radius, pre, post);
5068  return (rfDist >= 0.0 && rfDist <= 1.0);
5069 }
5070 
5071 double SNN::getRFDist3D(const RadiusRF& radius, const Point3D& pre, const Point3D& post) {
5072  // Note: RadiusRF rad is assumed to be the fanning in to the post neuron. So if the radius is 10 pixels, it means
5073  // that if you look at the post neuron, it will receive input from neurons that code for locations no more than
5074  // 10 pixels away.
5075 
5076  // ready output argument
5077  // SNN::isPoint3DinRF() will return true (connected) if rfDist e[0.0, 1.0]
5078  double rfDist = -1.0;
5079 
5080  // pre and post are connected in a generic 3D ellipsoid RF if x^2/a^2 + y^2/b^2 + z^2/c^2 <= 1.0, where
5081  // x = pre.x-post.x, y = pre.y-post.y, z = pre.z-post.z
5082  // x < 0 means: connect if y and z satisfy some constraints, but ignore x
5083  // x == 0 means: connect if y and z satisfy some constraints, and enforce pre.x == post.x
5084  if (radius.radX==0 && pre.x!=post.x || radius.radY==0 && pre.y!=post.y || radius.radZ==0 && pre.z!=post.z) {
5085  rfDist = -1.0;
5086  } else {
5087  // 3D ellipsoid: x^2/a^2 + y^2/b^2 + z^2/c^2 <= 1.0
5088  double xTerm = (radius.radX<=0) ? 0.0 : pow(pre.x-post.x,2)/pow(radius.radX,2);
5089  double yTerm = (radius.radY<=0) ? 0.0 : pow(pre.y-post.y,2)/pow(radius.radY,2);
5090  double zTerm = (radius.radZ<=0) ? 0.0 : pow(pre.z-post.z,2)/pow(radius.radZ,2);
5091  rfDist = xTerm + yTerm + zTerm;
5092  }
5093 
5094  return rfDist;
5095 }
5096 
5097 void SNN::partitionSNN() {
5098  int numAssignedNeurons[MAX_NET_PER_SNN] = {0};
5099 
5100  // get number of available GPU card(s) in the present machine
5101  numAvailableGPUs = configGPUDevice();
5102 
5103  for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
5104  // assign a group to the GPU specified by users
5105  int gGrpId = grpIt->second.gGrpId;
5106  int netId = groupConfigMap[gGrpId].preferredNetId;
5107  if (netId != ANY) {
5108  assert(netId > ANY && netId < MAX_NET_PER_SNN);
5109  grpIt->second.netId = netId;
5110  numAssignedNeurons[netId] += groupConfigMap[gGrpId].numN;
5111  groupPartitionLists[netId].push_back(grpIt->second); // Copy by value, create a copy
5112  } else { // netId == ANY
5113  // TODO: add callback function that allow user to partition network by theirself
5114  // FIXME: make sure GPU(s) is available first
5115  // this parse separates groups into each local network and assign each group a netId
5116  if (preferredSimMode_ == CPU_MODE) {
5117  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
5118  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
5119  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
5120  } else if (preferredSimMode_ == GPU_MODE) {
5121  grpIt->second.netId = GPU_RUNTIME_BASE; // GPU 0
5122  numAssignedNeurons[GPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
5123  groupPartitionLists[GPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
5124  } else if (preferredSimMode_ == HYBRID_MODE) {
5125  // TODO: implement partition algorithm, use naive partition for now (allocate to CPU 0)
5126  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
5127  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
5128  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
5129  } else {
5130  KERNEL_ERROR("Unkown simulation mode");
5131  exitSimulation(-1);
5132  }
5133  }
5134 
5135  if (grpIt->second.netId == -1) { // the group was not assigned to any computing backend
5136  KERNEL_ERROR("Can't assign the group [%d] to any partition", grpIt->second.gGrpId);
5137  exitSimulation(-1);
5138  }
5139  }
5140 
5141  // this parse finds local connections (i.e., connection configs that conect local groups)
5142  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5143  if (!groupPartitionLists[netId].empty()) {
5144  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5145  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5146  localConnectLists[netId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
5147  }
5148  }
5149 
5150  //printf("The size of compConnectConfigMap is: %i\n", compConnectConfigMap.size());
5151  for (std::map<int, compConnectConfig>::iterator connIt = compConnectConfigMap.begin(); connIt != compConnectConfigMap.end(); connIt++) {
5152  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5153  localCompConnectLists[netId].push_back(compConnectConfigMap[connIt->second.connId]); // Copy by value
5154  }
5155  }
5156  }
5157  }
5158 
5159  // this parse finds external groups and external connections
5160  spikeRoutingTable.clear();
5161  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5162  if (!groupPartitionLists[netId].empty()) {
5163  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5164  int srcNetId = groupConfigMDMap[connIt->second.grpSrc].netId;
5165  int destNetId = groupConfigMDMap[connIt->second.grpDest].netId;
5166  if (srcNetId == netId && destNetId != netId) {
5167  // search the source group in groupPartitionLists and mark it as having external connections
5168  GroupConfigMD targetGroup;
5169  std::list<GroupConfigMD>::iterator srcGrpIt, destGrpIt;
5170 
5171  targetGroup.gGrpId = connIt->second.grpSrc;
5172  srcGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5173  assert(srcGrpIt != groupPartitionLists[srcNetId].end());
5174  srcGrpIt->hasExternalConnect = true;
5175 
5176  // FIXME: fail to write external group if the only one external link across GPUs is uni directional (GPU0 -> GPU1, no GPU1 -> GPU0)
5177  targetGroup.gGrpId = connIt->second.grpDest;
5178  destGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5179  if (destGrpIt == groupPartitionLists[srcNetId].end()) { // the "external" dest group has not yet been copied to te "local" group partition list
5180  numAssignedNeurons[srcNetId] += groupConfigMap[connIt->second.grpDest].numN;
5181  groupPartitionLists[srcNetId].push_back(groupConfigMDMap[connIt->second.grpDest]);
5182  }
5183 
5184  targetGroup.gGrpId = connIt->second.grpSrc;
5185  srcGrpIt = find(groupPartitionLists[destNetId].begin(), groupPartitionLists[destNetId].end(), targetGroup);
5186  if (srcGrpIt == groupPartitionLists[destNetId].end()) {
5187  numAssignedNeurons[destNetId] += groupConfigMap[connIt->second.grpSrc].numN;
5188  groupPartitionLists[destNetId].push_back(groupConfigMDMap[connIt->second.grpSrc]);
5189  }
5190 
5191  externalConnectLists[srcNetId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
5192 
5193  // build the spike routing table by the way
5194  //printf("%d,%d -> %d,%d\n", srcNetId, connIt->second.grpSrc, destNetId, connIt->second.grpDest);
5195  RoutingTableEntry rte(srcNetId, destNetId);
5196  spikeRoutingTable.push_back(rte);
5197  }
5198  }
5199  }
5200  }
5201 
5202  spikeRoutingTable.unique();
5203 
5204  // assign local neuron ids and, local group ids for each local network in the order
5205  // MPORTANT : NEURON ORGANIZATION/ARRANGEMENT MAP
5206  // <--- Excitatory --> | <-------- Inhibitory REGION ----------> | <-- Excitatory --> | <-- External -->
5207  // Excitatory-Regular | Inhibitory-Regular | Inhibitory-Poisson | Excitatory-Poisson | External Neurons
5208  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5209  if (!groupPartitionLists[netId].empty()) {
5210  int availableNeuronId = 0;
5211  int localGroupId = 0;
5212  for (int order = 0; order < 5; order++) {
5213  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
5214  unsigned int type = groupConfigMap[grpIt->gGrpId].type;
5215  if (IS_EXCITATORY_TYPE(type) && (type & POISSON_NEURON) && order == 3 && grpIt->netId == netId) {
5216  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5217  localGroupId++;
5218  } else if (IS_INHIBITORY_TYPE(type) && (type & POISSON_NEURON) && order == 2 && grpIt->netId == netId) {
5219  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5220  localGroupId++;
5221  } else if (IS_EXCITATORY_TYPE(type) && !(type & POISSON_NEURON) && order == 0 && grpIt->netId == netId) {
5222  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5223  localGroupId++;
5224  } else if (IS_INHIBITORY_TYPE(type) && !(type & POISSON_NEURON) && order == 1 && grpIt->netId == netId) {
5225  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5226  localGroupId++;
5227  } else if (order == 4 && grpIt->netId != netId) {
5228  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5229  localGroupId++;
5230  }
5231  }
5232  }
5233  assert(availableNeuronId == numAssignedNeurons[netId]);
5234  assert(localGroupId == groupPartitionLists[netId].size());
5235  }
5236  }
5237 
5238 
5239  // generation connections among groups according to group and connect configs
5240  // update ConnectConfig::numberOfConnections
5241  // update GroupConfig::numPostSynapses, GroupConfig::numPreSynapses
5242  if (loadSimFID == NULL) {
5243  connectNetwork();
5244  } else {
5245  KERNEL_INFO("Load Simulation");
5246  loadSimulation_internal(false); // true or false doesn't matter here
5247  }
5248 
5249  collectGlobalNetworkConfigP();
5250 
5251  // print group and connection overview
5252  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5253  if (!groupPartitionLists[netId].empty()) {
5254  KERNEL_INFO("\n+ Local Network (%d)", netId);
5255  KERNEL_INFO("|-+ Group List:");
5256  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++)
5257  printGroupInfo(netId, grpIt);
5258  }
5259 
5260  if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
5261  KERNEL_INFO("|-+ Connection List:");
5262  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
5263  printConnectionInfo(netId, connIt);
5264 
5265  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
5266  printConnectionInfo(netId, connIt);
5267  }
5268  }
5269 
5270  // print spike routing table
5271  printSikeRoutingInfo();
5272 
5273  snnState = PARTITIONED_SNN;
5274 }
5275 
5276 int SNN::loadSimulation_internal(bool onlyPlastic) {
5277  // TSC: so that we can restore the file position later...
5278  // MB: not sure why though...
5279  long file_position = ftell(loadSimFID);
5280 
5281  int tmpInt;
5282  float tmpFloat;
5283 
5284  bool readErr = false; // keep track of reading errors
5285  size_t result;
5286 
5287 
5288  // ------- read header ----------------
5289 
5290  fseek(loadSimFID, 0, SEEK_SET);
5291 
5292  // read file signature
5293  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5294  readErr |= (result!=1);
5295  if (tmpInt != 294338571) {
5296  KERNEL_ERROR("loadSimulation: Unknown file signature. This does not seem to be a "
5297  "simulation file created with CARLsim::saveSimulation.");
5298  exitSimulation(-1);
5299  }
5300 
5301  // read file version number
5302  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
5303  readErr |= (result!=1);
5304  if (tmpFloat > 0.3f) {
5305  KERNEL_ERROR("loadSimulation: Unsupported version number (%f)",tmpFloat);
5306  exitSimulation(-1);
5307  }
5308 
5309  // read simulation time
5310  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
5311  readErr |= (result!=1);
5312 
5313  // read execution time
5314  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
5315  readErr |= (result!=1);
5316 
5317  // read number of neurons
5318  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5319  readErr |= (result!=1);
5320  if (tmpInt != glbNetworkConfig.numN) {
5321  KERNEL_ERROR("loadSimulation: Number of neurons in file (%d) and simulation (%d) don't match.",
5322  tmpInt, glbNetworkConfig.numN);
5323  exitSimulation(-1);
5324  }
5325 
5326  // skip save and read pre-synapses & post-synapses in CARLsim5 since they are now netID based
5327  // read number of pre-synapses
5328  // result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5329  // readErr |= (result!=1);
5330  // if (numPreSynNet != tmpInt) {
5331  // KERNEL_ERROR("loadSimulation: numPreSynNet in file (%d) and simulation (%d) don't match.",
5332  // tmpInt, numPreSynNet);
5333  // exitSimulation(-1);
5334  // }
5335 
5337  //result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5338  //readErr |= (result!=1);
5339  //if (numPostSynNet != tmpInt) {
5340  // KERNEL_ERROR("loadSimulation: numPostSynNet in file (%d) and simulation (%d) don't match.",
5341  // tmpInt, numPostSynNet);
5342  // exitSimulation(-1);
5343  //}
5344 
5345  // read number of groups
5346  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5347  readErr |= (result!=1);
5348  if (tmpInt != numGroups) {
5349  KERNEL_ERROR("loadSimulation: Number of groups in file (%d) and simulation (%d) don't match.",
5350  tmpInt, numGroups);
5351  exitSimulation(-1);
5352  }
5353 
5354  // throw reading error instead of proceeding
5355  if (readErr) {
5356  fprintf(stderr,"loadSimulation: Error while reading file header");
5357  exitSimulation(-1);
5358  }
5359 
5360 
5361  // ------- read group information ----------------
5362  for (int g=0; g<numGroups; g++) {
5363  // read StartN
5364  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5365  readErr |= (result!=1);
5366  if (tmpInt != groupConfigMDMap[g].gStartN) {
5367  KERNEL_ERROR("loadSimulation: StartN in file (%d) and grpInfo (%d) for group %d don't match.",
5368  tmpInt, groupConfigMDMap[g].gStartN, g);
5369  exitSimulation(-1);
5370  }
5371 
5372  // read EndN
5373  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5374  readErr |= (result!=1);
5375  if (tmpInt != groupConfigMDMap[g].gEndN) {
5376  KERNEL_ERROR("loadSimulation: EndN in file (%d) and grpInfo (%d) for group %d don't match.",
5377  tmpInt, groupConfigMDMap[g].gEndN, g);
5378  exitSimulation(-1);
5379  }
5380 
5381  // read SizeX
5382  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5383  readErr |= (result!=1);
5384  if (tmpInt != groupConfigMap[g].grid.numX) {
5385  KERNEL_ERROR("loadSimulation: numX in file (%d) and grpInfo (%d) for group %d don't match.",
5386  tmpInt, groupConfigMap[g].grid.numX, g);
5387  exitSimulation(-1);
5388  }
5389 
5390 
5391  // read SizeY
5392  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5393  readErr |= (result!=1);
5394  if (tmpInt != groupConfigMap[g].grid.numY) {
5395  KERNEL_ERROR("loadSimulation: numY in file (%d) and grpInfo (%d) for group %d don't match.",
5396  tmpInt, groupConfigMap[g].grid.numY, g);
5397  exitSimulation(-1);
5398  }
5399 
5400 
5401  // read SizeZ
5402  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5403  readErr |= (result!=1);
5404  if (tmpInt != groupConfigMap[g].grid.numZ) {
5405  KERNEL_ERROR("loadSimulation: numZ in file (%d) and grpInfo (%d) for group %d don't match.",
5406  tmpInt, groupConfigMap[g].grid.numZ, g);
5407  exitSimulation(-1);
5408  }
5409 
5410 
5411  // read group name
5412  char name[100];
5413  result = fread(name, sizeof(char), 100, loadSimFID);
5414  readErr |= (result!=100);
5415  if (strcmp(name,groupConfigMap[g].grpName.c_str()) != 0) {
5416  KERNEL_ERROR("loadSimulation: Group names in file (%s) and grpInfo (%s) don't match.", name,
5417  groupConfigMap[g].grpName.c_str());
5418  exitSimulation(-1);
5419  }
5420  }
5421 
5422  if (readErr) {
5423  KERNEL_ERROR("loadSimulation: Error while reading group info");
5424  exitSimulation(-1);
5425  }
5426  // // read weight
5427  // result = fread(&weight, sizeof(float), 1, loadSimFID);
5428  // readErr |= (result!=1);
5429 
5430  // short int gIDpre = managerRuntimeData.grpIds[nIDpre];
5431  // if (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (weight>0)
5432  // || !IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (weight<0)) {
5433  // KERNEL_ERROR("loadSimulation: Sign of weight value (%s) does not match neuron type (%s)",
5434  // ((weight>=0.0f)?"plus":"minus"),
5435  // (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type)?"inhibitory":"excitatory"));
5436  // exitSimulation(-1);
5437  // }
5438 
5439  // // read max weight
5440  // result = fread(&maxWeight, sizeof(float), 1, loadSimFID);
5441  // readErr |= (result!=1);
5442  // if (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (maxWeight>=0)
5443  // || !IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (maxWeight<=0)) {
5444  // KERNEL_ERROR("loadSimulation: Sign of maxWeight value (%s) does not match neuron type (%s)",
5445  // ((maxWeight>=0.0f)?"plus":"minus"),
5446  // (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type)?"inhibitory":"excitatory"));
5447  // exitSimulation(-1);
5448  // }
5449 
5450  // ------- read synapse information ----------------
5451  int net_count = 0;
5452  result = fread(&net_count, sizeof(int), 1, loadSimFID);
5453  readErr |= (result!=1);
5454 
5455  for (int i = 0; i < net_count; i++) {
5456  int synapse_count = 0;
5457  result = fread(&synapse_count, sizeof(int), 1, loadSimFID);
5458  for (int j = 0; j < synapse_count; j++) {
5459  int gGrpIdPre;
5460  int gGrpIdPost;
5461  int grpNIdPre;
5462  int grpNIdPost;
5463  int connId;
5464  float weight;
5465  float maxWeight;
5466  int delay;
5467 
5468  // read gGrpIdPre
5469  result = fread(&gGrpIdPre, sizeof(int), 1, loadSimFID);
5470  readErr != (result!=1);
5471 
5472  // read gGrpIdPost
5473  result = fread(&gGrpIdPost, sizeof(int), 1, loadSimFID);
5474  readErr != (result!=1);
5475 
5476  // read grpNIdPre
5477  result = fread(&grpNIdPre, sizeof(int), 1, loadSimFID);
5478  readErr != (result!=1);
5479 
5480  // read grpNIdPost
5481  result = fread(&grpNIdPost, sizeof(int), 1, loadSimFID);
5482  readErr != (result!=1);
5483 
5484  // read connId
5485  result = fread(&connId, sizeof(int), 1, loadSimFID);
5486  readErr != (result!=1);
5487 
5488  // read weight
5489  result = fread(&weight, sizeof(float), 1, loadSimFID);
5490  readErr != (result!=1);
5491 
5492  // read maxWeight
5493  result = fread(&maxWeight, sizeof(float), 1, loadSimFID);
5494  readErr != (result!=1);
5495 
5496  // read delay
5497  result = fread(&delay, sizeof(int), 1, loadSimFID);
5498  readErr != (result!=1);
5499 
5500  // check connection
5501  if (connectConfigMap[connId].grpSrc != gGrpIdPre) {
5502  KERNEL_ERROR("loadSimulation: source group in file (%d) and in simulation (%d) for connection %d don't match.",
5503  gGrpIdPre , connectConfigMap[connId].grpSrc, connId);
5504  exitSimulation(-1);
5505  }
5506 
5507  if (connectConfigMap[connId].grpDest != gGrpIdPost) {
5508  KERNEL_ERROR("loadSimulation: dest group in file (%d) and in simulation (%d) for connection %d don't match.",
5509  gGrpIdPost , connectConfigMap[connId].grpDest, connId);
5510  exitSimulation(-1);
5511  }
5512 
5513  // connect synapse
5514  // find netid for two groups
5515  int netIdPre = groupConfigMDMap[gGrpIdPre].netId;
5516  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
5517  bool isExternal = (netIdPre != netIdPost);
5518 
5519  // find global neuron id for two neurons
5520  int globalNIdPre = groupConfigMDMap[gGrpIdPre].gStartN + grpNIdPre;
5521  int globalNIdPost = groupConfigMDMap[gGrpIdPost].gStartN + grpNIdPost;
5522 
5523  bool connected =false;
5524  if (!isExternal) {
5525  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netIdPre].begin(); connIt != localConnectLists[netIdPre].end() && (!connected); connIt++) {
5526  if (connIt->connId == connId) {
5527  // connect two neurons
5528  connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, -1);
5529  connected = true;
5530  // update connection information
5531  connIt->numberOfConnections++;
5532  std::list<GroupConfigMD>::iterator grpIt;
5533 
5534  // fix me maybe: numPostSynapses and numPreSynpases could also be loaded from saved information directly to save time
5535  // the current implementation is a safer one
5536  GroupConfigMD targetGrp;
5537 
5538  targetGrp.gGrpId = gGrpIdPre;
5539  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5540  assert(grpIt != groupPartitionLists[netIdPre].end());
5541  grpIt->numPostSynapses += 1;
5542 
5543  targetGrp.gGrpId = gGrpIdPost;
5544  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5545  assert(grpIt != groupPartitionLists[netIdPost].end());
5546  grpIt->numPreSynapses += 1;
5547  }
5548  }
5549  } else {
5550  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netIdPre].begin(); connIt != externalConnectLists[netIdPre].end() && (!connected); connIt++) {
5551  if (connIt->connId == connId) {
5552  // connect two neurons
5553  connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, netIdPost);
5554  connected = true;
5555  // update connection information
5556  connIt->numberOfConnections++;
5557 
5558  // fix me maybe: numPostSynapses and numPreSynpases could also be loaded from saved information directly to save time
5559  // the current implementation is a safer one
5560  GroupConfigMD targetGrp;
5561  std::list<GroupConfigMD>::iterator grpIt;
5562 
5563  targetGrp.gGrpId = gGrpIdPre;
5564  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5565  assert(grpIt != groupPartitionLists[netIdPre].end());
5566  grpIt->numPostSynapses += 1;
5567 
5568  targetGrp.gGrpId = gGrpIdPost;
5569  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5570  assert(grpIt != groupPartitionLists[netIdPost].end());
5571  grpIt->numPreSynapses += 1;
5572 
5573  // update group information in another network
5574  targetGrp.gGrpId = gGrpIdPre;
5575  grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5576  assert(grpIt != groupPartitionLists[netIdPost].end());
5577  grpIt->numPostSynapses += 1;
5578 
5579  targetGrp.gGrpId = gGrpIdPost;
5580  grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5581  assert(grpIt != groupPartitionLists[netIdPost].end());
5582  grpIt->numPreSynapses += 1;
5583  }
5584  }
5585  }
5586  }
5587  }
5588 
5589  fseek(loadSimFID,file_position,SEEK_SET);
5590 
5591  return 0;
5592 }
5593 
5594 void SNN::generateRuntimeSNN() {
5595  // 1. genearte configurations for the simulation
5596  // generate (copy) group configs from groupPartitionLists[]
5597  generateRuntimeGroupConfigs();
5598 
5599  // generate (copy) connection configs from localConnectLists[] and exeternalConnectLists[]
5600  generateRuntimeConnectConfigs();
5601 
5602  // generate local network configs and accquire maximum size of rumtime data
5603  generateRuntimeNetworkConfigs();
5604 
5605  // 2. allocate space of runtime data used by the manager
5606  // - allocate firingTableD1, firingTableD2, timeTableD1, timeTableD2
5607  // - reset firingTableD1, firingTableD2, timeTableD1, timeTableD2
5608  allocateManagerSpikeTables();
5609  // - allocate voltage, recovery, Izh_a, Izh_b, Izh_c, Izh_d, current, extCurrent, gAMPA, gNMDA, gGABAa, gGABAb
5610  // lastSpikeTime, nSpikeCnt, stpu, stpx, Npre, Npre_plastic, Npost, cumulativePost, cumulativePre,
5611  // postSynapticIds, postDelayInfo, wt, wtChange, synSpikeTime, maxSynWt, preSynapticIds, grpIds, connIdsPreIdx,
5612  // grpDA, grp5HT, grpACh, grpNE, grpDABuffer, grp5HTBuffer, grpAChBuffer, grpNEBuffer, mulSynFast, mulSynSlow
5613  // - reset all above
5614  allocateManagerRuntimeData();
5615 
5616  // 3. initialize manager runtime data according to partitions (i.e., local networks)
5617  // 4a. allocate appropriate memory space (e.g., main memory (CPU) or device memory (GPU)).
5618  // 4b. load (copy) them to appropriate memory space for execution
5619  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5620  if (!groupPartitionLists[netId].empty()) {
5621  KERNEL_INFO("");
5622  if (netId < CPU_RUNTIME_BASE) {
5623  KERNEL_INFO("***************** Initializing GPU %d Runtime *************************", netId);
5624  } else {
5625  KERNEL_INFO("***************** Initializing CPU %d Runtime *************************", (netId - CPU_RUNTIME_BASE));
5626  }
5627  // build the runtime data according to local network, group, connection configuirations
5628 
5629  // generate runtime data for each group
5630  for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
5631  // local poisson neurons
5632  if (groupConfigs[netId][lGrpId].netId == netId && (groupConfigs[netId][lGrpId].Type & POISSON_NEURON)) {
5633  // - init lstSpikeTime
5634  // - reset avgFiring, stpu, stpx
5635  // - init stpx
5636  generatePoissonGroupRuntime(netId, lGrpId);
5637  }
5638  // local regular neurons
5639  if (groupConfigs[netId][lGrpId].netId == netId && !(groupConfigs[netId][lGrpId].Type & POISSON_NEURON)) {
5640  // - init grpDA, grp5HT, grpACh, grpNE
5641  // - init Izh_a, Izh_b, Izh_c, Izh_d, voltage, recovery, stpu, stpx
5642  // - init baseFiring, avgFiring
5643  // - init lastSpikeTime
5644  generateGroupRuntime(netId, lGrpId);
5645  }
5646  }
5647 
5648  // - init grpIds
5649  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) {
5650  managerRuntimeData.grpIds[lNId] = -1;
5651  for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) {
5652  if (lNId >= groupConfigs[netId][lGrpId].lStartN && lNId <= groupConfigs[netId][lGrpId].lEndN) {
5653  managerRuntimeData.grpIds[lNId] = (short int)lGrpId;
5654  break;
5655  }
5656  }
5657  assert(managerRuntimeData.grpIds[lNId] != -1);
5658  }
5659 
5660  // - init mulSynFast, mulSynSlow
5661  // - init Npre, Npre_plastic, Npost, cumulativePre, cumulativePost, preSynapticIds, postSynapticIds, postDelayInfo
5662  // - init wt, maxSynWt
5663  generateConnectionRuntime(netId);
5664 
5665  generateCompConnectionRuntime(netId);
5666 
5667  // - reset current
5668  resetCurrent(netId);
5669  // - reset conductance
5670  resetConductances(netId);
5671 
5672  // - reset wtChange
5673  // - init synSpikeTime
5674  resetSynapse(netId, false);
5675 
5676  allocateSNN(netId);
5677  }
5678  }
5679 
5680  // count allocated CPU/GPU runtime
5681  numGPUs = 0; numCores = 0;
5682  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5683  if (netId < CPU_RUNTIME_BASE && runtimeData[netId].allocated)
5684  numGPUs++;
5685  if (netId >= CPU_RUNTIME_BASE && runtimeData[netId].allocated)
5686  numCores++;
5687  }
5688 
5689  // 5. declare the spiking neural network is excutable
5690  snnState = EXECUTABLE_SNN;
5691 }
5692 
5693 void SNN::resetConductances(int netId) {
5694  if (networkConfigs[netId].sim_with_conductances) {
5695  memset(managerRuntimeData.gAMPA, 0, sizeof(float) * networkConfigs[netId].numNReg);
5696  if (networkConfigs[netId].sim_with_NMDA_rise) {
5697  memset(managerRuntimeData.gNMDA_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
5698  memset(managerRuntimeData.gNMDA_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
5699  } else {
5700  memset(managerRuntimeData.gNMDA, 0, sizeof(float) * networkConfigs[netId].numNReg);
5701  }
5702  memset(managerRuntimeData.gGABAa, 0, sizeof(float) * networkConfigs[netId].numNReg);
5703  if (networkConfigs[netId].sim_with_GABAb_rise) {
5704  memset(managerRuntimeData.gGABAb_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
5705  memset(managerRuntimeData.gGABAb_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
5706  } else {
5707  memset(managerRuntimeData.gGABAb, 0, sizeof(float) * networkConfigs[netId].numNReg);
5708  }
5709  }
5710 }
5711 
5712 void SNN::resetCurrent(int netId) {
5713  assert(managerRuntimeData.current != NULL);
5714  memset(managerRuntimeData.current, 0, sizeof(float) * networkConfigs[netId].numNReg);
5715 }
5716 
5717 // FIXME: unused function
5718 void SNN::resetFiringInformation() {
5719  // Reset firing tables and time tables to default values..
5720 
5721  // reset various times...
5722  simTimeMs = 0;
5723  simTimeSec = 0;
5724  simTime = 0;
5725 
5726  // reset the propogation Buffer.
5727  resetPropogationBuffer();
5728  // reset Timing Table..
5729  resetTimeTable();
5730 }
5731 
5732 void SNN::resetTiming() {
5733  prevExecutionTime = cumExecutionTime;
5734  executionTime = 0.0f;
5735 }
5736 
5737 void SNN::resetNeuromodulator(int netId, int lGrpId) {
5738  managerRuntimeData.grpDA[lGrpId] = groupConfigs[netId][lGrpId].baseDP;
5739  managerRuntimeData.grp5HT[lGrpId] = groupConfigs[netId][lGrpId].base5HT;
5740  managerRuntimeData.grpACh[lGrpId] = groupConfigs[netId][lGrpId].baseACh;
5741  managerRuntimeData.grpNE[lGrpId] = groupConfigs[netId][lGrpId].baseNE;
5742 }
5743 
5747 void SNN::resetNeuron(int netId, int lGrpId, int lNId) {
5748  int gGrpId = groupConfigs[netId][lGrpId].gGrpId; // get global group id
5749  assert(lNId < networkConfigs[netId].numNReg);
5750 
5751  if (groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a == -1 && groupConfigMap[gGrpId].isLIF == 0) {
5752  KERNEL_ERROR("setNeuronParameters must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5753  exitSimulation(1);
5754  }
5755 
5756  if (groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m == -1 && groupConfigMap[gGrpId].isLIF == 1) {
5757  KERNEL_ERROR("setNeuronParametersLIF must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5758  exitSimulation(1);
5759  }
5760 
5761  managerRuntimeData.Izh_a[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd * (float)drand48();
5762  managerRuntimeData.Izh_b[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd * (float)drand48();
5763  managerRuntimeData.Izh_c[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd * (float)drand48();
5764  managerRuntimeData.Izh_d[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd * (float)drand48();
5765  managerRuntimeData.Izh_C[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd * (float)drand48();
5766  managerRuntimeData.Izh_k[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd * (float)drand48();
5767  managerRuntimeData.Izh_vr[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd * (float)drand48();
5768  managerRuntimeData.Izh_vt[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd * (float)drand48();
5769  managerRuntimeData.Izh_vpeak[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd * (float)drand48();
5770  managerRuntimeData.lif_tau_m[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m;
5771  managerRuntimeData.lif_tau_ref[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref;
5772  managerRuntimeData.lif_tau_ref_c[lNId] = 0;
5773  managerRuntimeData.lif_vTh[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh;
5774  managerRuntimeData.lif_vReset[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset;
5775 
5776  // calculate gain and bias for the lif neuron
5777  if (groupConfigs[netId][lGrpId].isLIF){
5778  // gain an bias of the LIF neuron is calculated based on Membrane resistance
5779  float rmRange = (float)(groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem - groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem);
5780  float minRmem = (float)groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem;
5781  managerRuntimeData.lif_bias[lNId] = 0.0f;
5782  managerRuntimeData.lif_gain[lNId] = minRmem + rmRange * (float)drand48();
5783  }
5784 
5785  managerRuntimeData.nextVoltage[lNId] = managerRuntimeData.voltage[lNId] = groupConfigs[netId][lGrpId].isLIF ? managerRuntimeData.lif_vReset[lNId] : (groupConfigs[netId][lGrpId].withParamModel_9 ? managerRuntimeData.Izh_vr[lNId] : managerRuntimeData.Izh_c[lNId]);
5786  managerRuntimeData.recovery[lNId] = groupConfigs[netId][lGrpId].withParamModel_9 ? 0.0f : managerRuntimeData.Izh_b[lNId] * managerRuntimeData.voltage[lNId];
5787 
5788  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
5789  // set the baseFiring with some standard deviation.
5790  if (drand48() > 0.5) {
5791  managerRuntimeData.baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring + groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5792  } else {
5793  managerRuntimeData.baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring - groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5794  if(managerRuntimeData.baseFiring[lNId] < 0.1f) managerRuntimeData.baseFiring[lNId] = 0.1f;
5795  }
5796 
5797  if (groupConfigMap[gGrpId].homeoConfig.baseFiring != 0.0f) {
5798  managerRuntimeData.avgFiring[lNId] = managerRuntimeData.baseFiring[lNId];
5799  } else {
5800  managerRuntimeData.baseFiring[lNId] = 0.0f;
5801  managerRuntimeData.avgFiring[lNId] = 0.0f;
5802  }
5803  }
5804 
5805  managerRuntimeData.lastSpikeTime[lNId] = MAX_SIMULATION_TIME;
5806 
5807  if(groupConfigs[netId][lGrpId].WithSTP) {
5808  for (int j = 0; j < networkConfigs[netId].maxDelay + 1; j++) { // is of size maxDelay_+1
5809  int index = STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
5810  managerRuntimeData.stpu[index] = 0.0f;
5811  managerRuntimeData.stpx[index] = 1.0f;
5812  }
5813  }
5814 }
5815 
5816 void SNN::resetMonitors(bool deallocate) {
5817  // order is important! monitor objects might point to SNN or CARLsim,
5818  // need to deallocate them first
5819 
5820 
5821  // -------------- DEALLOCATE MONITOR OBJECTS ---------------------- //
5822 
5823  // delete all SpikeMonitor objects
5824  // don't kill SpikeMonitorCore objects, they will get killed automatically
5825  for (int i=0; i<numSpikeMonitor; i++) {
5826  if (spikeMonList[i]!=NULL && deallocate) delete spikeMonList[i];
5827  spikeMonList[i]=NULL;
5828  }
5829 
5830  // delete all NeuronMonitor objects
5831  // don't kill NeuronMonitorCore objects, they will get killed automatically
5832  for (int i = 0; i<numNeuronMonitor; i++) {
5833  if (neuronMonList[i] != NULL && deallocate) delete neuronMonList[i];
5834  neuronMonList[i] = NULL;
5835  }
5836 
5837  // delete all GroupMonitor objects
5838  // don't kill GroupMonitorCore objects, they will get killed automatically
5839  for (int i=0; i<numGroupMonitor; i++) {
5840  if (groupMonList[i]!=NULL && deallocate) delete groupMonList[i];
5841  groupMonList[i]=NULL;
5842  }
5843 
5844  // delete all ConnectionMonitor objects
5845  // don't kill ConnectionMonitorCore objects, they will get killed automatically
5846  for (int i=0; i<numConnectionMonitor; i++) {
5847  if (connMonList[i]!=NULL && deallocate) delete connMonList[i];
5848  connMonList[i]=NULL;
5849  }
5850 }
5851 
5852 void SNN::resetGroupConfigs(bool deallocate) {
5853  // clear all existing group configurations
5854  if (deallocate) groupConfigMap.clear();
5855 }
5856 
5857 void SNN::resetConnectionConfigs(bool deallocate) {
5858  // clear all existing connection configurations
5859  if (deallocate) connectConfigMap.clear();
5860 }
5861 
5862 void SNN::deleteManagerRuntimeData() {
5863  if (spikeBuf!=NULL) delete spikeBuf;
5864  if (managerRuntimeData.spikeGenBits!=NULL) delete[] managerRuntimeData.spikeGenBits;
5865  spikeBuf=NULL; managerRuntimeData.spikeGenBits=NULL;
5866 
5867  // clear data (i.e., concentration of neuromodulator) of groups
5868  if (managerRuntimeData.grpDA != NULL) delete [] managerRuntimeData.grpDA;
5869  if (managerRuntimeData.grp5HT != NULL) delete [] managerRuntimeData.grp5HT;
5870  if (managerRuntimeData.grpACh != NULL) delete [] managerRuntimeData.grpACh;
5871  if (managerRuntimeData.grpNE != NULL) delete [] managerRuntimeData.grpNE;
5872  managerRuntimeData.grpDA = NULL;
5873  managerRuntimeData.grp5HT = NULL;
5874  managerRuntimeData.grpACh = NULL;
5875  managerRuntimeData.grpNE = NULL;
5876 
5877  // clear assistive data buffer for group monitor
5878  if (managerRuntimeData.grpDABuffer != NULL) delete [] managerRuntimeData.grpDABuffer;
5879  if (managerRuntimeData.grp5HTBuffer != NULL) delete [] managerRuntimeData.grp5HTBuffer;
5880  if (managerRuntimeData.grpAChBuffer != NULL) delete [] managerRuntimeData.grpAChBuffer;
5881  if (managerRuntimeData.grpNEBuffer != NULL) delete [] managerRuntimeData.grpNEBuffer;
5882  managerRuntimeData.grpDABuffer = NULL; managerRuntimeData.grp5HTBuffer = NULL;
5883  managerRuntimeData.grpAChBuffer = NULL; managerRuntimeData.grpNEBuffer = NULL;
5884 
5885  // -------------- DEALLOCATE CORE OBJECTS ---------------------- //
5886 
5887  if (managerRuntimeData.voltage!=NULL) delete[] managerRuntimeData.voltage;
5888  if (managerRuntimeData.nextVoltage != NULL) delete[] managerRuntimeData.nextVoltage;
5889  if (managerRuntimeData.recovery!=NULL) delete[] managerRuntimeData.recovery;
5890  if (managerRuntimeData.current!=NULL) delete[] managerRuntimeData.current;
5891  if (managerRuntimeData.extCurrent!=NULL) delete[] managerRuntimeData.extCurrent;
5892  if (managerRuntimeData.totalCurrent != NULL) delete[] managerRuntimeData.totalCurrent;
5893  if (managerRuntimeData.curSpike != NULL) delete[] managerRuntimeData.curSpike;
5894  if (managerRuntimeData.nVBuffer != NULL) delete[] managerRuntimeData.nVBuffer;
5895  if (managerRuntimeData.nUBuffer != NULL) delete[] managerRuntimeData.nUBuffer;
5896  if (managerRuntimeData.nIBuffer != NULL) delete[] managerRuntimeData.nIBuffer;
5897  managerRuntimeData.voltage=NULL; managerRuntimeData.recovery=NULL; managerRuntimeData.current=NULL; managerRuntimeData.extCurrent=NULL;
5898  managerRuntimeData.nextVoltage = NULL; managerRuntimeData.totalCurrent = NULL; managerRuntimeData.curSpike = NULL;
5899  managerRuntimeData.nVBuffer = NULL; managerRuntimeData.nUBuffer = NULL; managerRuntimeData.nIBuffer = NULL;
5900 
5901  if (managerRuntimeData.Izh_a!=NULL) delete[] managerRuntimeData.Izh_a;
5902  if (managerRuntimeData.Izh_b!=NULL) delete[] managerRuntimeData.Izh_b;
5903  if (managerRuntimeData.Izh_c!=NULL) delete[] managerRuntimeData.Izh_c;
5904  if (managerRuntimeData.Izh_d!=NULL) delete[] managerRuntimeData.Izh_d;
5905  if (managerRuntimeData.Izh_C!=NULL) delete[] managerRuntimeData.Izh_C;
5906  if (managerRuntimeData.Izh_k!=NULL) delete[] managerRuntimeData.Izh_k;
5907  if (managerRuntimeData.Izh_vr!=NULL) delete[] managerRuntimeData.Izh_vr;
5908  if (managerRuntimeData.Izh_vt!=NULL) delete[] managerRuntimeData.Izh_vt;
5909  if (managerRuntimeData.Izh_vpeak!=NULL) delete[] managerRuntimeData.Izh_vpeak;
5910  managerRuntimeData.Izh_a=NULL; managerRuntimeData.Izh_b=NULL; managerRuntimeData.Izh_c=NULL; managerRuntimeData.Izh_d=NULL;
5911  managerRuntimeData.Izh_C = NULL; managerRuntimeData.Izh_k = NULL; managerRuntimeData.Izh_vr = NULL; managerRuntimeData.Izh_vt = NULL; managerRuntimeData.Izh_vpeak = NULL;
5912 
5913  if (managerRuntimeData.lif_tau_m!=NULL) delete[] managerRuntimeData.lif_tau_m;
5914  if (managerRuntimeData.lif_tau_ref!=NULL) delete[] managerRuntimeData.lif_tau_ref;
5915  if (managerRuntimeData.lif_tau_ref_c!=NULL) delete[] managerRuntimeData.lif_tau_ref_c;
5916  if (managerRuntimeData.lif_vTh!=NULL) delete[] managerRuntimeData.lif_vTh;
5917  if (managerRuntimeData.lif_vReset!=NULL) delete[] managerRuntimeData.lif_vReset;
5918  if (managerRuntimeData.lif_gain!=NULL) delete[] managerRuntimeData.lif_gain;
5919  if (managerRuntimeData.lif_bias!=NULL) delete[] managerRuntimeData.lif_bias;
5920  managerRuntimeData.lif_tau_m=NULL; managerRuntimeData.lif_tau_ref=NULL; managerRuntimeData.lif_vTh=NULL;
5921  managerRuntimeData.lif_vReset=NULL; managerRuntimeData.lif_gain=NULL; managerRuntimeData.lif_bias=NULL;
5922  managerRuntimeData.lif_tau_ref_c=NULL;
5923 
5924  if (managerRuntimeData.Npre!=NULL) delete[] managerRuntimeData.Npre;
5925  if (managerRuntimeData.Npre_plastic!=NULL) delete[] managerRuntimeData.Npre_plastic;
5926  if (managerRuntimeData.Npost!=NULL) delete[] managerRuntimeData.Npost;
5927  managerRuntimeData.Npre=NULL; managerRuntimeData.Npre_plastic=NULL; managerRuntimeData.Npost=NULL;
5928 
5929  if (managerRuntimeData.cumulativePre!=NULL) delete[] managerRuntimeData.cumulativePre;
5930  if (managerRuntimeData.cumulativePost!=NULL) delete[] managerRuntimeData.cumulativePost;
5931  managerRuntimeData.cumulativePre=NULL; managerRuntimeData.cumulativePost=NULL;
5932 
5933  if (managerRuntimeData.gAMPA!=NULL) delete[] managerRuntimeData.gAMPA;
5934  if (managerRuntimeData.gNMDA!=NULL) delete[] managerRuntimeData.gNMDA;
5935  if (managerRuntimeData.gNMDA_r!=NULL) delete[] managerRuntimeData.gNMDA_r;
5936  if (managerRuntimeData.gNMDA_d!=NULL) delete[] managerRuntimeData.gNMDA_d;
5937  if (managerRuntimeData.gGABAa!=NULL) delete[] managerRuntimeData.gGABAa;
5938  if (managerRuntimeData.gGABAb!=NULL) delete[] managerRuntimeData.gGABAb;
5939  if (managerRuntimeData.gGABAb_r!=NULL) delete[] managerRuntimeData.gGABAb_r;
5940  if (managerRuntimeData.gGABAb_d!=NULL) delete[] managerRuntimeData.gGABAb_d;
5941  managerRuntimeData.gAMPA=NULL; managerRuntimeData.gNMDA=NULL; managerRuntimeData.gNMDA_r=NULL; managerRuntimeData.gNMDA_d=NULL;
5942  managerRuntimeData.gGABAa=NULL; managerRuntimeData.gGABAb=NULL; managerRuntimeData.gGABAb_r=NULL; managerRuntimeData.gGABAb_d=NULL;
5943 
5944  if (managerRuntimeData.stpu!=NULL) delete[] managerRuntimeData.stpu;
5945  if (managerRuntimeData.stpx!=NULL) delete[] managerRuntimeData.stpx;
5946  managerRuntimeData.stpu=NULL; managerRuntimeData.stpx=NULL;
5947 
5948  if (managerRuntimeData.avgFiring!=NULL) delete[] managerRuntimeData.avgFiring;
5949  if (managerRuntimeData.baseFiring!=NULL) delete[] managerRuntimeData.baseFiring;
5950  managerRuntimeData.avgFiring=NULL; managerRuntimeData.baseFiring=NULL;
5951 
5952  if (managerRuntimeData.lastSpikeTime!=NULL) delete[] managerRuntimeData.lastSpikeTime;
5953  if (managerRuntimeData.synSpikeTime !=NULL) delete[] managerRuntimeData.synSpikeTime;
5954  if (managerRuntimeData.nSpikeCnt!=NULL) delete[] managerRuntimeData.nSpikeCnt;
5955  managerRuntimeData.lastSpikeTime=NULL; managerRuntimeData.synSpikeTime=NULL; managerRuntimeData.nSpikeCnt=NULL;
5956 
5957  if (managerRuntimeData.postDelayInfo!=NULL) delete[] managerRuntimeData.postDelayInfo;
5958  if (managerRuntimeData.preSynapticIds!=NULL) delete[] managerRuntimeData.preSynapticIds;
5959  if (managerRuntimeData.postSynapticIds!=NULL) delete[] managerRuntimeData.postSynapticIds;
5960  managerRuntimeData.postDelayInfo=NULL; managerRuntimeData.preSynapticIds=NULL; managerRuntimeData.postSynapticIds=NULL;
5961 
5962  if (managerRuntimeData.wt!=NULL) delete[] managerRuntimeData.wt;
5963  if (managerRuntimeData.maxSynWt!=NULL) delete[] managerRuntimeData.maxSynWt;
5964  if (managerRuntimeData.wtChange !=NULL) delete[] managerRuntimeData.wtChange;
5965  managerRuntimeData.wt=NULL; managerRuntimeData.maxSynWt=NULL; managerRuntimeData.wtChange=NULL;
5966 
5967  if (mulSynFast!=NULL) delete[] mulSynFast;
5968  if (mulSynSlow!=NULL) delete[] mulSynSlow;
5969  if (managerRuntimeData.connIdsPreIdx!=NULL) delete[] managerRuntimeData.connIdsPreIdx;
5970  mulSynFast=NULL; mulSynSlow=NULL; managerRuntimeData.connIdsPreIdx=NULL;
5971 
5972  if (managerRuntimeData.grpIds!=NULL) delete[] managerRuntimeData.grpIds;
5973  managerRuntimeData.grpIds=NULL;
5974 
5975  if (managerRuntimeData.timeTableD2 != NULL) delete [] managerRuntimeData.timeTableD2;
5976  if (managerRuntimeData.timeTableD1 != NULL) delete [] managerRuntimeData.timeTableD1;
5977  managerRuntimeData.timeTableD2 = NULL; managerRuntimeData.timeTableD1 = NULL;
5978 
5979  if (managerRuntimeData.firingTableD2!=NULL) delete[] managerRuntimeData.firingTableD2;
5980  if (managerRuntimeData.firingTableD1!=NULL) delete[] managerRuntimeData.firingTableD1;
5981  //if (managerRuntimeData.firingTableD2!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.firingTableD2));
5982  //if (managerRuntimeData.firingTableD1!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.firingTableD1));
5983  managerRuntimeData.firingTableD2 = NULL; managerRuntimeData.firingTableD1 = NULL;
5984 
5985  if (managerRuntimeData.extFiringTableD2!=NULL) delete[] managerRuntimeData.extFiringTableD2;
5986  if (managerRuntimeData.extFiringTableD1!=NULL) delete[] managerRuntimeData.extFiringTableD1;
5987  //if (managerRuntimeData.extFiringTableD2!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableD2));
5988  //if (managerRuntimeData.extFiringTableD1!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableD1));
5989  managerRuntimeData.extFiringTableD2 = NULL; managerRuntimeData.extFiringTableD1 = NULL;
5990 
5991  if (managerRuntimeData.extFiringTableEndIdxD1 != NULL) delete[] managerRuntimeData.extFiringTableEndIdxD1;
5992  if (managerRuntimeData.extFiringTableEndIdxD2 != NULL) delete[] managerRuntimeData.extFiringTableEndIdxD2;
5993  //if (managerRuntimeData.extFiringTableEndIdxD1 != NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableEndIdxD1));
5994  //if (managerRuntimeData.extFiringTableEndIdxD2 != NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableEndIdxD2));
5995  managerRuntimeData.extFiringTableEndIdxD1 = NULL; managerRuntimeData.extFiringTableEndIdxD2 = NULL;
5996 }
5997 
6001 void SNN::resetPoissonNeuron(int netId, int lGrpId, int lNId) {
6002  assert(lNId < networkConfigs[netId].numN);
6003  managerRuntimeData.lastSpikeTime[lNId] = MAX_SIMULATION_TIME;
6004  if (groupConfigs[netId][lGrpId].WithHomeostasis)
6005  managerRuntimeData.avgFiring[lNId] = 0.0f;
6006 
6007  if (groupConfigs[netId][lGrpId].WithSTP) {
6008  for (int j = 0; j < networkConfigs[netId].maxDelay + 1; j++) { // is of size maxDelay_+1
6009  int index = STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
6010  managerRuntimeData.stpu[index] = 0.0f;
6011  managerRuntimeData.stpx[index] = 1.0f;
6012  }
6013  }
6014 }
6015 
6016 void SNN::resetPropogationBuffer() {
6017  // FIXME: why 1023?
6018  spikeBuf->reset(0, 1023);
6019 }
6020 
6021 //Reset wt, wtChange, pre-firing time values to default values, rewritten to
6022 //integrate changes between JMN and MDR -- KDC
6023 //if changeWeights is false, we should keep the values of the weights as they currently
6024 //are but we should be able to change them to plastic or fixed synapses. -- KDC
6025 // FIXME: imlement option of resetting weights
6026 void SNN::resetSynapse(int netId, bool changeWeights) {
6027  memset(managerRuntimeData.wtChange, 0, sizeof(float) * networkConfigs[netId].numPreSynNet); // reset the synaptic derivatives
6028 
6029  for (int syn = 0; syn < networkConfigs[netId].numPreSynNet; syn++)
6030  managerRuntimeData.synSpikeTime[syn] = MAX_SIMULATION_TIME; // reset the spike time of each syanpse
6031 }
6032 
6033 void SNN::resetTimeTable() {
6034  memset(managerRuntimeData.timeTableD2, 0, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
6035  memset(managerRuntimeData.timeTableD1, 0, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
6036 }
6037 
6038 void SNN::resetFiringTable() {
6039  memset(managerRuntimeData.firingTableD2, 0, sizeof(int) * managerRTDSize.maxMaxSpikeD2);
6040  memset(managerRuntimeData.firingTableD1, 0, sizeof(int) * managerRTDSize.maxMaxSpikeD1);
6041  memset(managerRuntimeData.extFiringTableEndIdxD2, 0, sizeof(int) * managerRTDSize.maxNumGroups);
6042  memset(managerRuntimeData.extFiringTableEndIdxD1, 0, sizeof(int) * managerRTDSize.maxNumGroups);
6043  memset(managerRuntimeData.extFiringTableD2, 0, sizeof(int*) * managerRTDSize.maxNumGroups);
6044  memset(managerRuntimeData.extFiringTableD1, 0, sizeof(int*) * managerRTDSize.maxNumGroups);
6045 }
6046 
6047 void SNN::resetSpikeCnt(int gGrpId) {
6048  assert(gGrpId >= ALL);
6049 
6050  if (gGrpId == ALL) {
6051  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6052  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
6053  cpu_set_t cpus;
6054  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
6055  int threadCount = 0;
6056  #endif
6057 
6058  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6059  if (!groupPartitionLists[netId].empty()) {
6060  if (netId < CPU_RUNTIME_BASE) // GPU runtime
6061  resetSpikeCnt_GPU(netId, ALL);
6062  else{ // CPU runtime
6063  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
6064  resetSpikeCnt_CPU(netId, ALL);
6065  #else // Linux or MAC
6066  pthread_attr_t attr;
6067  pthread_attr_init(&attr);
6068  CPU_ZERO(&cpus);
6069  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
6070  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
6071 
6072  argsThreadRoutine[threadCount].snn_pointer = this;
6073  argsThreadRoutine[threadCount].netId = netId;
6074  argsThreadRoutine[threadCount].lGrpId = ALL;
6075  argsThreadRoutine[threadCount].startIdx = 0;
6076  argsThreadRoutine[threadCount].endIdx = 0;
6077  argsThreadRoutine[threadCount].GtoLOffset = 0;
6078 
6079  pthread_create(&threads[threadCount], &attr, &SNN::helperResetSpikeCnt_CPU, (void*)&argsThreadRoutine[threadCount]);
6080  pthread_attr_destroy(&attr);
6081  threadCount++;
6082  #endif
6083  }
6084  }
6085  }
6086 
6087  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6088  // join all the threads
6089  for (int i=0; i<threadCount; i++){
6090  pthread_join(threads[i], NULL);
6091  }
6092  #endif
6093  }
6094  else {
6095  int netId = groupConfigMDMap[gGrpId].netId;
6096  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6097 
6098  if (netId < CPU_RUNTIME_BASE) // GPU runtime
6099  resetSpikeCnt_GPU(netId, lGrpId);
6100  else // CPU runtime
6101  resetSpikeCnt_CPU(netId, lGrpId);
6102  }
6103 }
6104 
6105 
6107 inline SynInfo SNN::SET_CONN_ID(int nId, int sId, int grpId) {
6108  if (grpId > GROUP_ID_MASK) {
6109  KERNEL_ERROR("Error: Group Id (%d) exceeds maximum limit (%d)", grpId, GROUP_ID_MASK);
6111  }
6112 
6113  SynInfo synInfo;
6114  //p.postId = (((sid)<<CONN_SYN_NEURON_BITS)+((nid)&CONN_SYN_NEURON_MASK));
6115  //p.grpId = grpId;
6116  synInfo.gsId = ((grpId << NUM_SYNAPSE_BITS) | sId);
6117  synInfo.nId = nId;
6118 
6119  return synInfo;
6120 }
6121 
6122 
6123 void SNN::setGrpTimeSlice(int gGrpId, int timeSlice) {
6124  if (gGrpId == ALL) {
6125  for(int grpId = 0; grpId < numGroups; grpId++) {
6126  if (groupConfigMap[grpId].isSpikeGenerator)
6127  setGrpTimeSlice(grpId, timeSlice);
6128  }
6129  } else {
6130  assert((timeSlice > 0 ) && (timeSlice <= MAX_TIME_SLICE));
6131  // the group should be poisson spike generator group
6132  groupConfigMDMap[gGrpId].currTimeSlice = timeSlice;
6133  }
6134 }
6135 
6136 // method to set const member randSeed_
6137 int SNN::setRandSeed(int seed) {
6138  if (seed<0)
6139  return time(NULL);
6140  else if(seed==0)
6141  return 123;
6142  else
6143  return seed;
6144 }
6145 
6146 void SNN::fillSpikeGenBits(int netId) {
6147  SpikeBuffer::SpikeIterator spikeBufIter;
6148  SpikeBuffer::SpikeIterator spikeBufIterEnd = spikeBuf->back();
6149 
6150  // Covert spikes stored in spikeBuffer to SpikeGenBit
6151  for (spikeBufIter = spikeBuf->front(); spikeBufIter != spikeBufIterEnd; ++spikeBufIter) {
6152  // get the global neuron id and group id for this particular spike
6153  int gGrpId = spikeBufIter->grpId;
6154 
6155  if (groupConfigMDMap[gGrpId].netId == netId) {
6156  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6157  int lNId = spikeBufIter->neurId /* gNId */ + groupConfigMDMap[gGrpId].GtoLOffset;
6158 
6159  // add spike to spikeGentBit
6160  assert(groupConfigMap[gGrpId].isSpikeGenerator == true);
6161 
6162  int nIdPos = (lNId - groupConfigs[netId][lGrpId].lStartN + groupConfigs[netId][lGrpId].Noffset);
6163  int nIdBitPos = nIdPos % 32;
6164  int nIdIndex = nIdPos / 32;
6165 
6166  assert(nIdIndex < (networkConfigs[netId].numNSpikeGen / 32 + 1));
6167 
6168  managerRuntimeData.spikeGenBits[nIdIndex] |= (1 << nIdBitPos);
6169  }
6170  }
6171 }
6172 
6173 void SNN::startTiming() { prevExecutionTime = cumExecutionTime; }
6174 void SNN::stopTiming() {
6175  executionTime += (cumExecutionTime - prevExecutionTime);
6176  prevExecutionTime = cumExecutionTime;
6177 }
6178 
6179 // enters testing phase
6180 // in testing, no weight changes can be made, allowing you to evaluate learned weights, etc.
6181 void SNN::startTesting(bool shallUpdateWeights) {
6182  // because this can be called at any point in time, if we're off the 1-second grid, we want to make
6183  // sure to apply the accumulated weight changes to the weight matrix
6184  // but we don't reset the wt update interval counter
6185  if (shallUpdateWeights && !sim_in_testing) {
6186  // careful: need to temporarily adjust stdpScaleFactor to make this right
6187  if (wtANDwtChangeUpdateIntervalCnt_) {
6188  float storeScaleSTDP = stdpScaleFactor_;
6189  stdpScaleFactor_ = 1.0f/wtANDwtChangeUpdateIntervalCnt_;
6190 
6191  updateWeights();
6192 
6193  stdpScaleFactor_ = storeScaleSTDP;
6194  }
6195  }
6196 
6197  sim_in_testing = true;
6198 
6199  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6200  if (!groupPartitionLists[netId].empty()) {
6201  networkConfigs[netId].sim_in_testing = true;
6202  updateNetworkConfig(netId); // update networkConfigRT struct (|TODO copy only a single boolean)
6203  }
6204  }
6205 }
6206 
6207 // exits testing phase
6209  sim_in_testing = false;
6210 
6211  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6212  if (!groupPartitionLists[netId].empty()) {
6213  networkConfigs[netId].sim_in_testing = false;
6214  updateNetworkConfig(netId); // update networkConfigRT struct (|TODO copy only a single boolean)
6215  }
6216  }
6217 }
6218 
6219 void SNN::updateConnectionMonitor(short int connId) {
6220  for (int monId=0; monId<numConnectionMonitor; monId++) {
6221  if (connId==ALL || connMonCoreList[monId]->getConnectId()==connId) {
6222  int timeInterval = connMonCoreList[monId]->getUpdateTimeIntervalSec();
6223  if (timeInterval==1 || timeInterval>1 && (getSimTime()%timeInterval)==0) {
6224  // this ConnectionMonitor wants periodic recording
6225  connMonCoreList[monId]->writeConnectFileSnapshot(simTime,
6226  getWeightMatrix2D(connMonCoreList[monId]->getConnectId()));
6227  }
6228  }
6229  }
6230 }
6231 
6232 // FIXME: modify this for multi-GPUs
6233 std::vector< std::vector<float> > SNN::getWeightMatrix2D(short int connId) {
6234  assert(connId > ALL); // ALL == -1
6235  std::vector< std::vector<float> > wtConnId;
6236 
6237  int grpIdPre = connectConfigMap[connId].grpSrc;
6238  int grpIdPost = connectConfigMap[connId].grpDest;
6239 
6240  int netIdPost = groupConfigMDMap[grpIdPost].netId;
6241  int lGrpIdPost = groupConfigMDMap[grpIdPost].lGrpId;
6242 
6243  // init weight matrix with right dimensions
6244  for (int i = 0; i < groupConfigMap[grpIdPre].numN; i++) {
6245  std::vector<float> wtSlice;
6246  for (int j = 0; j < groupConfigMap[grpIdPost].numN; j++) {
6247  wtSlice.push_back(NAN);
6248  }
6249  wtConnId.push_back(wtSlice);
6250  }
6251 
6252  // copy the weights for a given post-group from device
6253  // \TODO: check if the weights for this grpIdPost have already been copied
6254  // \TODO: even better, but tricky because of ordering, make copyWeightState connection-based
6255 
6256  assert(grpIdPost > ALL); // ALL == -1
6257 
6258  // Note, copyWeightState() also copies pre-connections information (e.g., Npre, Npre_plastic, cumulativePre, and preSynapticIds)
6259  fetchWeightState(netIdPost, lGrpIdPost);
6260  fetchConnIdsLookupArray(netIdPost);
6261 
6262  for (int lNIdPost = groupConfigs[netIdPost][lGrpIdPost].lStartN; lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN; lNIdPost++) {
6263  unsigned int pos_ij = managerRuntimeData.cumulativePre[lNIdPost];
6264  for (int i = 0; i < managerRuntimeData.Npre[lNIdPost]; i++, pos_ij++) {
6265  // skip synapses that belong to a different connection ID
6266  if (managerRuntimeData.connIdsPreIdx[pos_ij] != connId) //connInfo->connId)
6267  continue;
6268 
6269  // find pre-neuron ID and update ConnectionMonitor container
6270  int lNIdPre = GET_CONN_NEURON_ID(managerRuntimeData.preSynapticIds[pos_ij]);
6271  int lGrpIdPre = GET_CONN_GRP_ID(managerRuntimeData.preSynapticIds[pos_ij]);
6272  wtConnId[lNIdPre - groupConfigs[netIdPost][lGrpIdPre].lStartN][lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN] =
6273  fabs(managerRuntimeData.wt[pos_ij]);
6274  }
6275  }
6276 
6277  return wtConnId;
6278 }
6279 
6280 void SNN::updateGroupMonitor(int gGrpId) {
6281  // don't continue if no group monitors in the network
6282  if (!numGroupMonitor)
6283  return;
6284 
6285  if (gGrpId == ALL) {
6286  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6287  updateGroupMonitor(gGrpId);
6288  } else {
6289  int netId = groupConfigMDMap[gGrpId].netId;
6290  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6291  // update group monitor of a specific group
6292  // find index in group monitor arrays
6293  int monitorId = groupConfigMDMap[gGrpId].groupMonitorId;
6294 
6295  // don't continue if no group monitor enabled for this group
6296  if (monitorId < 0) return;
6297 
6298  // find last update time for this group
6299  GroupMonitorCore* grpMonObj = groupMonCoreList[monitorId];
6300  int lastUpdate = grpMonObj->getLastUpdated();
6301 
6302  // don't continue if time interval is zero (nothing to update)
6303  if (getSimTime() - lastUpdate <= 0)
6304  return;
6305 
6306  if (getSimTime() - lastUpdate > 1000)
6307  KERNEL_ERROR("updateGroupMonitor(grpId=%d) must be called at least once every second", gGrpId);
6308 
6309  // copy the group status (neuromodulators) to the manager runtime
6310  fetchGroupState(netId, lGrpId);
6311 
6312  // find the time interval in which to update group status
6313  // usually, we call updateGroupMonitor once every second, so the time interval is [0,1000)
6314  // however, updateGroupMonitor can be called at any time t \in [0,1000)... so we can have the cases
6315  // [0,t), [t,1000), and even [t1, t2)
6316  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
6317  int numMsMax = getSimTimeMs(); // upper bound is given by current time
6318  if (numMsMax == 0)
6319  numMsMax = 1000; // special case: full second
6320  assert(numMsMin < numMsMax);
6321 
6322  // current time is last completed second in milliseconds (plus t to be added below)
6323  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
6324  int currentTimeSec = getSimTimeSec();
6325  if (!getSimTimeMs())
6326  currentTimeSec--;
6327 
6328  // save current time as last update time
6329  grpMonObj->setLastUpdated(getSimTime());
6330 
6331  // prepare fast access
6332  FILE* grpFileId = groupMonCoreList[monitorId]->getGroupFileId();
6333  bool writeGroupToFile = grpFileId != NULL;
6334  bool writeGroupToArray = grpMonObj->isRecording();
6335  float data;
6336 
6337  // Read one peice of data at a time from the buffer and put the data to an appopriate monitor buffer. Later the user
6338  // may need need to dump these group status data to an output file
6339  for(int t = numMsMin; t < numMsMax; t++) {
6340  // fetch group status data, support dopamine concentration currently
6341  data = managerRuntimeData.grpDABuffer[lGrpId * 1000 + t];
6342 
6343  // current time is last completed second plus whatever is leftover in t
6344  int time = currentTimeSec * 1000 + t;
6345 
6346  if (writeGroupToFile) {
6347  // TODO: write to group status file
6348  }
6349 
6350  if (writeGroupToArray) {
6351  grpMonObj->pushData(time, data);
6352  }
6353  }
6354 
6355  if (grpFileId!=NULL) // flush group status file
6356  fflush(grpFileId);
6357  }
6358 }
6359 
6360 // FIXME: wrong to use groupConfigs[0]
6361 void SNN::userDefinedSpikeGenerator(int gGrpId) {
6362  // \FIXME this function is a mess
6363  SpikeGeneratorCore* spikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc;
6364  int netId = groupConfigMDMap[gGrpId].netId;
6365  int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6366  int currTime = simTime;
6367  bool done;
6368 
6369  fetchLastSpikeTime(netId);
6370 
6371  for(int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
6372  // start the time from the last time it spiked, that way we can ensure that the refractory period is maintained
6373  int lNId = gNId + groupConfigMDMap[gGrpId].GtoLOffset;
6374  int nextTime = managerRuntimeData.lastSpikeTime[lNId];
6375  if (nextTime == MAX_SIMULATION_TIME)
6376  nextTime = 0;
6377 
6378  // the end of the valid time window is either the length of the scheduling time slice from now (because that
6379  // is the max of the allowed propagated buffer size) or simply the end of the simulation
6380  int endOfTimeWindow = std::min(currTime+timeSlice, simTimeRunStop);
6381 
6382  done = false;
6383  while (!done) {
6384  // generate the next spike time (nextSchedTime) from the nextSpikeTime callback
6385  int nextSchedTime = spikeGenFunc->nextSpikeTime(this, gGrpId, gNId - groupConfigMDMap[gGrpId].gStartN, currTime, nextTime, endOfTimeWindow);
6386 
6387  // the generated spike time is valid only if:
6388  // - it has not been scheduled before (nextSchedTime > nextTime)
6389  // - but careful: we would drop spikes at t=0, because we cannot initialize nextTime to -1...
6390  // - it is within the scheduling time slice (nextSchedTime < endOfTimeWindow)
6391  // - it is not in the past (nextSchedTime >= currTime)
6392  if ((nextSchedTime==0 || nextSchedTime>nextTime) && nextSchedTime<endOfTimeWindow && nextSchedTime>=currTime) {
6393 // fprintf(stderr,"%u: spike scheduled for %d at %u\n",currTime, i-groupConfigs[0][grpId].StartN,nextSchedTime);
6394  // scheduled spike...
6395  // \TODO CPU mode does not check whether the same AER event has been scheduled before (bug #212)
6396  // check how GPU mode does it, then do the same here.
6397  nextTime = nextSchedTime;
6398  spikeBuf->schedule(gNId, gGrpId, nextTime - currTime);
6399  } else {
6400  done = true;
6401  }
6402  }
6403  }
6404 }
6405 
6406 void SNN::generateUserDefinedSpikes() {
6407  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
6408  if (groupConfigMap[gGrpId].isSpikeGenerator) {
6409  // This evaluation is done to check if its time to get new set of spikes..
6410  // check whether simTime has advance more than the current time slice, in which case we need to schedule
6411  // spikes for the next time slice
6412  // we always have to run this the first millisecond of a new runNetwork call; that is,
6413  // when simTime==simTimeRunStart
6414  if(((simTime - groupConfigMDMap[gGrpId].sliceUpdateTime) >= groupConfigMDMap[gGrpId].currTimeSlice || simTime == simTimeRunStart)) {
6415  int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6416  groupConfigMDMap[gGrpId].sliceUpdateTime = simTime;
6417 
6418  // we dont generate any poisson spike if during the
6419  // current call we might exceed the maximum 32 bit integer value
6420  if ((simTime + timeSlice) == MAX_SIMULATION_TIME || (simTime + timeSlice) < 0)
6421  return;
6422 
6423  if (groupConfigMap[gGrpId].spikeGenFunc != NULL) {
6424  userDefinedSpikeGenerator(gGrpId);
6425  }
6426  }
6427  }
6428  }
6429 }
6430 
6436 void SNN::allocateManagerSpikeTables() {
6437  managerRuntimeData.firingTableD2 = new int[managerRTDSize.maxMaxSpikeD2];
6438  managerRuntimeData.firingTableD1 = new int[managerRTDSize.maxMaxSpikeD1];
6439  managerRuntimeData.extFiringTableEndIdxD2 = new int[managerRTDSize.maxNumGroups];
6440  managerRuntimeData.extFiringTableEndIdxD1 = new int[managerRTDSize.maxNumGroups];
6441  managerRuntimeData.extFiringTableD2 = new int*[managerRTDSize.maxNumGroups];
6442  managerRuntimeData.extFiringTableD1 = new int*[managerRTDSize.maxNumGroups];
6443 
6444  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.firingTableD2, sizeof(int) * managerRTDSize.maxMaxSpikeD2));
6445  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.firingTableD1, sizeof(int) * managerRTDSize.maxMaxSpikeD1));
6446  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableEndIdxD2, sizeof(int) * managerRTDSize.maxNumGroups));
6447  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableEndIdxD1, sizeof(int) * managerRTDSize.maxNumGroups));
6448  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableD2, sizeof(int*) * managerRTDSize.maxNumGroups));
6449  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableD1, sizeof(int*) * managerRTDSize.maxNumGroups));
6450  resetFiringTable();
6451 
6452  managerRuntimeData.timeTableD2 = new unsigned int[TIMING_COUNT];
6453  managerRuntimeData.timeTableD1 = new unsigned int[TIMING_COUNT];
6454  resetTimeTable();
6455 }
6456 
6457 // updates simTime, returns true when new second started
6458 bool SNN::updateTime() {
6459  bool finishedOneSec = false;
6460 
6461  // done one second worth of simulation
6462  // update relevant parameters...now
6463  if(++simTimeMs == 1000) {
6464  simTimeMs = 0;
6465  simTimeSec++;
6466  finishedOneSec = true;
6467  }
6468 
6469  simTime++;
6470  if(simTime == MAX_SIMULATION_TIME || simTime < 0){
6471  // reached the maximum limit of the simulation time using 32 bit value...
6472  KERNEL_WARN("Maximum Simulation Time Reached...Resetting simulation time");
6473  }
6474 
6475  return finishedOneSec;
6476 }
6477 
6478 // FIXME: modify this for multi-GPUs
6479 void SNN::updateSpikeMonitor(int gGrpId) {
6480  // don't continue if no spike monitors in the network
6481  if (!numSpikeMonitor)
6482  return;
6483 
6484  if (gGrpId == ALL) {
6485  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6486  updateSpikeMonitor(gGrpId);
6487  } else {
6488  int netId = groupConfigMDMap[gGrpId].netId;
6489  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6490  // update spike monitor of a specific group
6491  // find index in spike monitor arrays
6492  int monitorId = groupConfigMDMap[gGrpId].spikeMonitorId;
6493 
6494  // don't continue if no spike monitor enabled for this group
6495  if (monitorId < 0) return;
6496 
6497  // find last update time for this group
6498  SpikeMonitorCore* spkMonObj = spikeMonCoreList[monitorId];
6499  long int lastUpdate = spkMonObj->getLastUpdated();
6500 
6501  // don't continue if time interval is zero (nothing to update)
6502  if ( ((long int)getSimTime()) - lastUpdate <= 0)
6503  return;
6504 
6505  if ( ((long int)getSimTime()) - lastUpdate > 1000)
6506  KERNEL_ERROR("updateSpikeMonitor(grpId=%d) must be called at least once every second",gGrpId);
6507 
6508  // AER buffer max size warning here.
6509  // Because of C++ short-circuit evaluation, the last condition should not be evaluated
6510  // if the previous conditions are false.
6511  if (spkMonObj->getAccumTime() > LONG_SPIKE_MON_DURATION \
6512  && this->getGroupNumNeurons(gGrpId) > LARGE_SPIKE_MON_GRP_SIZE \
6513  && spkMonObj->isBufferBig()){
6514  // change this warning message to correct message
6515  KERNEL_WARN("updateSpikeMonitor(grpId=%d) is becoming very large. (>%lu MB)",gGrpId,(long int) MAX_SPIKE_MON_BUFFER_SIZE/1024 );// make this better
6516  KERNEL_WARN("Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.",spkMonObj->getAccumTime()/(1000*60),this->getGroupNumNeurons(gGrpId));
6517  }
6518 
6519  // copy the neuron firing information to the manager runtime
6520  fetchSpikeTables(netId);
6521  fetchGrpIdsLookupArray(netId);
6522 
6523  // find the time interval in which to update spikes
6524  // usually, we call updateSpikeMonitor once every second, so the time interval is [0,1000)
6525  // however, updateSpikeMonitor can be called at any time t \in [0,1000)... so we can have the cases
6526  // [0,t), [t,1000), and even [t1, t2)
6527  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
6528  int numMsMax = getSimTimeMs(); // upper bound is given by current time
6529  if (numMsMax == 0)
6530  numMsMax = 1000; // special case: full second
6531  assert(numMsMin < numMsMax);
6532 
6533  // current time is last completed second in milliseconds (plus t to be added below)
6534  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
6535  int currentTimeSec = getSimTimeSec();
6536  if (!getSimTimeMs())
6537  currentTimeSec--;
6538 
6539  // save current time as last update time
6540  spkMonObj->setLastUpdated( (long int)getSimTime() );
6541 
6542  // prepare fast access
6543  FILE* spkFileId = spikeMonCoreList[monitorId]->getSpikeFileId();
6544  bool writeSpikesToFile = spkFileId != NULL;
6545  bool writeSpikesToArray = spkMonObj->getMode()==AER && spkMonObj->isRecording();
6546 
6547  // Read one spike at a time from the buffer and put the spikes to an appopriate monitor buffer. Later the user
6548  // may need need to dump these spikes to an output file
6549  for (int k = 0; k < 2; k++) {
6550  unsigned int* timeTablePtr = (k == 0) ? managerRuntimeData.timeTableD2 : managerRuntimeData.timeTableD1;
6551  int* fireTablePtr = (k == 0) ? managerRuntimeData.firingTableD2 : managerRuntimeData.firingTableD1;
6552  for(int t = numMsMin; t < numMsMax; t++) {
6553  for(int i = timeTablePtr[t + glbNetworkConfig.maxDelay]; i < timeTablePtr[t + glbNetworkConfig.maxDelay + 1]; i++) {
6554  // retrieve the neuron id
6555  int lNId = fireTablePtr[i];
6556 
6557  // make sure neuron belongs to currently relevant group
6558  int this_grpId = managerRuntimeData.grpIds[lNId];
6559  if (this_grpId != lGrpId)
6560  continue;
6561 
6562  // adjust nid to be 0-indexed for each group
6563  // this way, if a group has 10 neurons, their IDs in the spike file and spike monitor will be
6564  // indexed from 0..9, no matter what their real nid is
6565  int nId = lNId - groupConfigs[netId][lGrpId].lStartN;
6566  assert(nId >= 0);
6567 
6568  // current time is last completed second plus whatever is leftover in t
6569  int time = currentTimeSec * 1000 + t;
6570 
6571  if (writeSpikesToFile) {
6572  int cnt;
6573  cnt = fwrite(&time, sizeof(int), 1, spkFileId); assert(cnt==1);
6574  cnt = fwrite(&nId, sizeof(int), 1, spkFileId); assert(cnt==1);
6575  }
6576 
6577  if (writeSpikesToArray) {
6578  spkMonObj->pushAER(time, nId);
6579  }
6580  }
6581  }
6582  }
6583 
6584  if (spkFileId!=NULL) // flush spike file
6585  fflush(spkFileId);
6586  }
6587 }
6588 
6589 // FIXME: modify this for multi-GPUs
6590 void SNN::updateNeuronMonitor(int gGrpId) {
6591  // don't continue if no neuron monitors in the network
6592  if (!numNeuronMonitor)
6593  return;
6594 
6595  //printf("The global group id is: %i\n", gGrpId);
6596 
6597  if (gGrpId == ALL) {
6598  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6599  updateNeuronMonitor(gGrpId);
6600  }
6601  else {
6602  //printf("UpdateNeuronMonitor is being executed!\n");
6603  int netId = groupConfigMDMap[gGrpId].netId;
6604  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6605  // update neuron monitor of a specific group
6606  // find index in neuron monitor arrays
6607  int monitorId = groupConfigMDMap[gGrpId].neuronMonitorId;
6608 
6609  // don't continue if no spike monitor enabled for this group
6610  if (monitorId < 0) return;
6611 
6612  // find last update time for this group
6613  NeuronMonitorCore* nrnMonObj = neuronMonCoreList[monitorId];
6614  long int lastUpdate = nrnMonObj->getLastUpdated();
6615 
6616  // don't continue if time interval is zero (nothing to update)
6617  if (((long int)getSimTime()) - lastUpdate <= 0)
6618  return;
6619 
6620  if (((long int)getSimTime()) - lastUpdate > 1000)
6621  KERNEL_ERROR("updateNeuronMonitor(grpId=%d) must be called at least once every second", gGrpId);
6622 
6623  // AER buffer max size warning here.
6624  // Because of C++ short-circuit evaluation, the last condition should not be evaluated
6625  // if the previous conditions are false.
6626 
6627  /*if (nrnMonObj->getAccumTime() > LONG_NEURON_MON_DURATION \
6628  && this->getGroupNumNeurons(gGrpId) > LARGE_NEURON_MON_GRP_SIZE \
6629  && nrnMonObj->isBufferBig()) {
6630  // change this warning message to correct message
6631  KERNEL_WARN("updateNeuronMonitor(grpId=%d) is becoming very large. (>%lu MB)", gGrpId, (long int)MAX_NEURON_MON_BUFFER_SIZE / 1024);// make this better
6632  KERNEL_WARN("Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.", nrnMonObj->getAccumTime() / (1000 * 60), this->getGroupNumNeurons(gGrpId));
6633  }*/
6634 
6635  // copy the neuron information to manager runtime
6636  fetchNeuronStateBuffer(netId, lGrpId);
6637 
6638  // find the time interval in which to update neuron state info
6639  // usually, we call updateNeuronMonitor once every second, so the time interval is [0,1000)
6640  // however, updateNeuronMonitor can be called at any time t \in [0,1000)... so we can have the cases
6641  // [0,t), [t,1000), and even [t1, t2)
6642  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
6643  int numMsMax = getSimTimeMs(); // upper bound is given by current time
6644  if (numMsMax == 0)
6645  numMsMax = 1000; // special case: full second
6646  assert(numMsMin < numMsMax);
6647  //KERNEL_INFO("lastUpdate: %d -- numMsMin: %d -- numMsMax: %d", lastUpdate, numMsMin, numMsMax);
6648 
6649  // current time is last completed second in milliseconds (plus t to be added below)
6650  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
6651  int currentTimeSec = getSimTimeSec();
6652  if (!getSimTimeMs())
6653  currentTimeSec--;
6654 
6655  // save current time as last update time
6656  nrnMonObj->setLastUpdated((long int)getSimTime());
6657 
6658  // prepare fast access
6659  FILE* nrnFileId = neuronMonCoreList[monitorId]->getNeuronFileId();
6660  bool writeNeuronStateToFile = nrnFileId != NULL;
6661  bool writeNeuronStateToArray = nrnMonObj->isRecording();
6662 
6663  // Read one neuron state value at a time from the buffer and put the neuron state values to an appopriate monitor buffer.
6664  // Later the user may need need to dump these neuron state values to an output file
6665  //printf("The numMsMin is: %i; and numMsMax is: %i\n", numMsMin, numMsMax);
6666  for (int t = numMsMin; t < numMsMax; t++) {
6667  //printf("The lStartN is: %i; and lEndN is: %i\n", groupConfigs[netId][lGrpId].lStartN, groupConfigs[netId][lGrpId].lEndN);
6668  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
6669  float v, u, I;
6670 
6671  // make sure neuron belongs to currently relevant group
6672  int this_grpId = managerRuntimeData.grpIds[lNId];
6673  if (this_grpId != lGrpId)
6674  continue;
6675 
6676  // adjust nid to be 0-indexed for each group
6677  // this way, if a group has 10 neurons, their IDs in the spike file and spike monitor will be
6678  // indexed from 0..9, no matter what their real nid is
6679  int nId = lNId - groupConfigs[netId][lGrpId].lStartN;
6680  assert(nId >= 0);
6681 
6682  int idxBase = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * t + lGrpId * MAX_NEURON_MON_GRP_SZIE;
6683  v = managerRuntimeData.nVBuffer[idxBase + nId];
6684  u = managerRuntimeData.nUBuffer[idxBase + nId];
6685  I = managerRuntimeData.nIBuffer[idxBase + nId];
6686 
6687  //printf("Voltage recorded is: %f\n", v);
6688 
6689  // current time is last completed second plus whatever is leftover in t
6690  int time = currentTimeSec * 1000 + t;
6691 
6692  //KERNEL_INFO("t: %d -- time: %d --base: %d -- nId: %d -- v: %f -- u: %f, --I: %f", t, time, idxBase + nId, nId, v, u, I);
6693 
6694  // WRITE TO A TEXT FILE INSTEAD OF BINARY
6695  if (writeNeuronStateToFile) {
6696  //KERNEL_INFO("Save to file");
6697  int cnt;
6698  cnt = fwrite(&time, sizeof(int), 1, nrnFileId); assert(cnt == 1);
6699  cnt = fwrite(&nId, sizeof(int), 1, nrnFileId); assert(cnt == 1);
6700  cnt = fwrite(&v, sizeof(float), 1, nrnFileId); assert(cnt == 1);
6701  cnt = fwrite(&u, sizeof(float), 1, nrnFileId); assert(cnt == 1);
6702  cnt = fwrite(&I, sizeof(float), 1, nrnFileId); assert(cnt == 1);
6703  }
6704 
6705  if (writeNeuronStateToArray) {
6706  //KERNEL_INFO("Save to array");
6707  nrnMonObj->pushNeuronState(nId, v, u, I);
6708  }
6709  }
6710  }
6711 
6712  if (nrnFileId != NULL) // flush neuron state file
6713  fflush(nrnFileId);
6714  }
6715 }
6716 
6717 // FIXME: update summary format for multiGPUs
6718 void SNN::printSimSummary() {
6719  float etime;
6720 
6721  // FIXME: measure total execution time, and GPU excution time
6722  stopTiming();
6723  etime = executionTime;
6724 
6725  fetchNetworkSpikeCount();
6726 
6727  KERNEL_INFO("\n");
6728  KERNEL_INFO("******************** Simulation Summary ***************************");
6729 
6730  KERNEL_INFO("Network Parameters: \tnumNeurons = %d (numNExcReg:numNInhReg = %2.1f:%2.1f)",
6731  glbNetworkConfig.numN, 100.0 * glbNetworkConfig.numNExcReg / glbNetworkConfig.numN, 100.0 * glbNetworkConfig.numNInhReg / glbNetworkConfig.numN);
6732  KERNEL_INFO("\t\t\tnumSynapses = %d", glbNetworkConfig.numSynNet);
6733  KERNEL_INFO("\t\t\tmaxDelay = %d", glbNetworkConfig.maxDelay);
6734  KERNEL_INFO("Simulation Mode:\t%s",sim_with_conductances?"COBA":"CUBA");
6735  KERNEL_INFO("Random Seed:\t\t%d", randSeed_);
6736  KERNEL_INFO("Timing:\t\t\tModel Simulation Time = %lld sec", (unsigned long long)simTimeSec);
6737  KERNEL_INFO("\t\t\tActual Execution Time = %4.2f sec", etime/1000.0f);
6738  KERNEL_INFO("Average Firing Rate:\t2+ms delay = %3.3f Hz",
6739  glbNetworkConfig.numN2msDelay > 0 ? managerRuntimeData.spikeCountD2 / (1.0 * simTimeSec * glbNetworkConfig.numN2msDelay) : 0.0f);
6740  KERNEL_INFO("\t\t\t1ms delay = %3.3f Hz",
6741  glbNetworkConfig.numN1msDelay > 0 ? managerRuntimeData.spikeCountD1 / (1.0 * simTimeSec * glbNetworkConfig.numN1msDelay) : 0.0f);
6742  KERNEL_INFO("\t\t\tOverall = %3.3f Hz", managerRuntimeData.spikeCount / (1.0 * simTimeSec * glbNetworkConfig.numN));
6743  KERNEL_INFO("Overall Spike Count Transferred:");
6744  KERNEL_INFO("\t\t\t2+ms delay = %d", managerRuntimeData.spikeCountExtRxD2);
6745  KERNEL_INFO("\t\t\t1ms delay = %d", managerRuntimeData.spikeCountExtRxD1);
6746  KERNEL_INFO("Overall Spike Count:\t2+ms delay = %d", managerRuntimeData.spikeCountD2);
6747  KERNEL_INFO("\t\t\t1ms delay = %d", managerRuntimeData.spikeCountD1);
6748  KERNEL_INFO("\t\t\tTotal = %d", managerRuntimeData.spikeCount);
6749  KERNEL_INFO("*********************************************************************************\n");
6750 }
6751 
6752 //------------------------------ legacy code --------------------------------//
6753 
6754 // We parallelly cleanup the postSynapticIds array to minimize any other wastage in that array by compacting the store
6755 // Appropriate alignment specified by ALIGN_COMPACTION macro is used to ensure some level of alignment (if necessary)
6756 //void SNN::compactConnections() {
6757 // unsigned int* tmp_cumulativePost = new unsigned int[numN];
6758 // unsigned int* tmp_cumulativePre = new unsigned int[numN];
6759 // unsigned int lastCnt_pre = 0;
6760 // unsigned int lastCnt_post = 0;
6761 //
6762 // tmp_cumulativePost[0] = 0;
6763 // tmp_cumulativePre[0] = 0;
6764 //
6765 // for(int i=1; i < numN; i++) {
6766 // lastCnt_post = tmp_cumulativePost[i-1]+managerRuntimeData.Npost[i-1]; //position of last pointer
6767 // lastCnt_pre = tmp_cumulativePre[i-1]+managerRuntimeData.Npre[i-1]; //position of last pointer
6768 // #if COMPACTION_ALIGNMENT_POST
6769 // lastCnt_post= lastCnt_post + COMPACTION_ALIGNMENT_POST-lastCnt_post%COMPACTION_ALIGNMENT_POST;
6770 // lastCnt_pre = lastCnt_pre + COMPACTION_ALIGNMENT_PRE- lastCnt_pre%COMPACTION_ALIGNMENT_PRE;
6771 // #endif
6772 // tmp_cumulativePost[i] = lastCnt_post;
6773 // tmp_cumulativePre[i] = lastCnt_pre;
6774 // assert(tmp_cumulativePost[i] <= managerRuntimeData.cumulativePost[i]);
6775 // assert(tmp_cumulativePre[i] <= managerRuntimeData.cumulativePre[i]);
6776 // }
6777 //
6778 // // compress the post_synaptic array according to the new values of the tmp_cumulative counts....
6779 // unsigned int tmp_numPostSynNet = tmp_cumulativePost[numN-1]+managerRuntimeData.Npost[numN-1];
6780 // unsigned int tmp_numPreSynNet = tmp_cumulativePre[numN-1]+managerRuntimeData.Npre[numN-1];
6781 // assert(tmp_numPostSynNet <= allocatedPost);
6782 // assert(tmp_numPreSynNet <= allocatedPre);
6783 // assert(tmp_numPostSynNet <= numPostSynNet);
6784 // assert(tmp_numPreSynNet <= numPreSynNet);
6785 // KERNEL_DEBUG("******************");
6786 // KERNEL_DEBUG("CompactConnection: ");
6787 // KERNEL_DEBUG("******************");
6788 // KERNEL_DEBUG("old_postCnt = %d, new_postCnt = %d", numPostSynNet, tmp_numPostSynNet);
6789 // KERNEL_DEBUG("old_preCnt = %d, new_postCnt = %d", numPreSynNet, tmp_numPreSynNet);
6790 //
6791 // // new buffer with required size + 100 bytes of additional space just to provide limited overflow
6792 // SynInfo* tmp_postSynapticIds = new SynInfo[tmp_numPostSynNet+100];
6793 //
6794 // // new buffer with required size + 100 bytes of additional space just to provide limited overflow
6795 // SynInfo* tmp_preSynapticIds = new SynInfo[tmp_numPreSynNet+100];
6796 // float* tmp_wt = new float[tmp_numPreSynNet+100];
6797 // float* tmp_maxSynWt = new float[tmp_numPreSynNet+100];
6798 // short int *tmp_cumConnIdPre = new short int[tmp_numPreSynNet+100];
6799 // float *tmp_mulSynFast = new float[numConnections];
6800 // float *tmp_mulSynSlow = new float[numConnections];
6801 //
6802 // // compact synaptic information
6803 // for(int i=0; i<numN; i++) {
6804 // assert(tmp_cumulativePost[i] <= managerRuntimeData.cumulativePost[i]);
6805 // assert(tmp_cumulativePre[i] <= managerRuntimeData.cumulativePre[i]);
6806 // for( int j=0; j<managerRuntimeData.Npost[i]; j++) {
6807 // unsigned int tmpPos = tmp_cumulativePost[i]+j;
6808 // unsigned int oldPos = managerRuntimeData.cumulativePost[i]+j;
6809 // tmp_postSynapticIds[tmpPos] = managerRuntimeData.postSynapticIds[oldPos];
6810 // tmp_SynapticDelay[tmpPos] = tmp_SynapticDelay[oldPos];
6811 // }
6812 // for( int j=0; j<managerRuntimeData.Npre[i]; j++) {
6813 // unsigned int tmpPos = tmp_cumulativePre[i]+j;
6814 // unsigned int oldPos = managerRuntimeData.cumulativePre[i]+j;
6815 // tmp_preSynapticIds[tmpPos] = managerRuntimeData.preSynapticIds[oldPos];
6816 // tmp_maxSynWt[tmpPos] = managerRuntimeData.maxSynWt[oldPos];
6817 // tmp_wt[tmpPos] = managerRuntimeData.wt[oldPos];
6818 // tmp_cumConnIdPre[tmpPos] = managerRuntimeData.connIdsPreIdx[oldPos];
6819 // }
6820 // }
6821 //
6822 // // delete old buffer space
6823 // delete[] managerRuntimeData.postSynapticIds;
6824 // managerRuntimeData.postSynapticIds = tmp_postSynapticIds;
6825 // cpuSnnSz.networkInfoSize -= (sizeof(SynInfo)*numPostSynNet);
6826 // cpuSnnSz.networkInfoSize += (sizeof(SynInfo)*(tmp_numPostSynNet+100));
6827 //
6828 // delete[] managerRuntimeData.cumulativePost;
6829 // managerRuntimeData.cumulativePost = tmp_cumulativePost;
6830 //
6831 // delete[] managerRuntimeData.cumulativePre;
6832 // managerRuntimeData.cumulativePre = tmp_cumulativePre;
6833 //
6834 // delete[] managerRuntimeData.maxSynWt;
6835 // managerRuntimeData.maxSynWt = tmp_maxSynWt;
6836 // cpuSnnSz.synapticInfoSize -= (sizeof(float)*numPreSynNet);
6837 // cpuSnnSz.synapticInfoSize += (sizeof(float)*(tmp_numPreSynNet+100));
6838 //
6839 // delete[] managerRuntimeData.wt;
6840 // managerRuntimeData.wt = tmp_wt;
6841 // cpuSnnSz.synapticInfoSize -= (sizeof(float)*numPreSynNet);
6842 // cpuSnnSz.synapticInfoSize += (sizeof(float)*(tmp_numPreSynNet+100));
6843 //
6844 // delete[] managerRuntimeData.connIdsPreIdx;
6845 // managerRuntimeData.connIdsPreIdx = tmp_cumConnIdPre;
6846 // cpuSnnSz.synapticInfoSize -= (sizeof(short int)*numPreSynNet);
6847 // cpuSnnSz.synapticInfoSize += (sizeof(short int)*(tmp_numPreSynNet+100));
6848 //
6849 // // compact connection-centric information
6850 // for (int i=0; i<numConnections; i++) {
6851 // tmp_mulSynFast[i] = mulSynFast[i];
6852 // tmp_mulSynSlow[i] = mulSynSlow[i];
6853 // }
6854 // delete[] mulSynFast;
6855 // delete[] mulSynSlow;
6856 // mulSynFast = tmp_mulSynFast;
6857 // mulSynSlow = tmp_mulSynSlow;
6858 // cpuSnnSz.networkInfoSize -= (2*sizeof(uint8_t)*numPreSynNet);
6859 // cpuSnnSz.networkInfoSize += (2*sizeof(uint8_t)*(tmp_numPreSynNet+100));
6860 //
6861 //
6862 // delete[] managerRuntimeData.preSynapticIds;
6863 // managerRuntimeData.preSynapticIds = tmp_preSynapticIds;
6864 // cpuSnnSz.synapticInfoSize -= (sizeof(SynInfo)*numPreSynNet);
6865 // cpuSnnSz.synapticInfoSize += (sizeof(SynInfo)*(tmp_numPreSynNet+100));
6866 //
6867 // numPreSynNet = tmp_numPreSynNet;
6868 // numPostSynNet = tmp_numPostSynNet;
6869 //}
6870 
6871 //The post synaptic connections are sorted based on delay here so that we can reduce storage requirement
6872 //and generation of spike at the post-synaptic side.
6873 //We also create the delay_info array has the delay_start and delay_length parameter
6874 //void SNN::reorganizeDelay()
6875 //{
6876 // for(int grpId=0; grpId < numGroups; grpId++) {
6877 // for(int nid=groupConfigs[0][grpId].StartN; nid <= groupConfigs[0][grpId].EndN; nid++) {
6878 // unsigned int jPos=0; // this points to the top of the delay queue
6879 // unsigned int cumN=managerRuntimeData.cumulativePost[nid]; // cumulativePost[] is unsigned int
6880 // unsigned int cumDelayStart=0; // Npost[] is unsigned short
6881 // for(int td = 0; td < maxDelay_; td++) {
6882 // unsigned int j=jPos; // start searching from top of the queue until the end
6883 // unsigned int cnt=0; // store the number of nodes with a delay of td;
6884 // while(j < managerRuntimeData.Npost[nid]) {
6885 // // found a node j with delay=td and we put
6886 // // the delay value = 1 at array location td=0;
6887 // if(td==(tmp_SynapticDelay[cumN+j]-1)) {
6888 // assert(jPos<managerRuntimeData.Npost[nid]);
6889 // swapConnections(nid, j, jPos);
6890 //
6891 // jPos=jPos+1;
6892 // cnt=cnt+1;
6893 // }
6894 // j=j+1;
6895 // }
6896 //
6897 // // update the delay_length and start values...
6898 // managerRuntimeData.postDelayInfo[nid*(maxDelay_+1)+td].delay_length = cnt;
6899 // managerRuntimeData.postDelayInfo[nid*(maxDelay_+1)+td].delay_index_start = cumDelayStart;
6900 // cumDelayStart += cnt;
6901 //
6902 // assert(cumDelayStart <= managerRuntimeData.Npost[nid]);
6903 // }
6904 //
6905 // // total cumulative delay should be equal to number of post-synaptic connections at the end of the loop
6906 // assert(cumDelayStart == managerRuntimeData.Npost[nid]);
6907 // for(unsigned int j=1; j < managerRuntimeData.Npost[nid]; j++) {
6908 // unsigned int cumN=managerRuntimeData.cumulativePost[nid]; // cumulativePost[] is unsigned int
6909 // if( tmp_SynapticDelay[cumN+j] < tmp_SynapticDelay[cumN+j-1]) {
6910 // KERNEL_ERROR("Post-synaptic delays not sorted correctly... id=%d, delay[%d]=%d, delay[%d]=%d",
6911 // nid, j, tmp_SynapticDelay[cumN+j], j-1, tmp_SynapticDelay[cumN+j-1]);
6912 // assert( tmp_SynapticDelay[cumN+j] >= tmp_SynapticDelay[cumN+j-1]);
6913 // }
6914 // }
6915 // }
6916 // }
6917 //}
6918 
6919 //void SNN::swapConnections(int nid, int oldPos, int newPos) {
6920 // unsigned int cumN=managerRuntimeData.cumulativePost[nid];
6921 //
6922 // // Put the node oldPos to the top of the delay queue
6923 // SynInfo tmp = managerRuntimeData.postSynapticIds[cumN+oldPos];
6924 // managerRuntimeData.postSynapticIds[cumN+oldPos]= managerRuntimeData.postSynapticIds[cumN+newPos];
6925 // managerRuntimeData.postSynapticIds[cumN+newPos]= tmp;
6926 //
6927 // // Ensure that you have shifted the delay accordingly....
6928 // uint8_t tmp_delay = tmp_SynapticDelay[cumN+oldPos];
6929 // tmp_SynapticDelay[cumN+oldPos] = tmp_SynapticDelay[cumN+newPos];
6930 // tmp_SynapticDelay[cumN+newPos] = tmp_delay;
6931 //
6932 // // update the pre-information for the postsynaptic neuron at the position oldPos.
6933 // SynInfo postInfo = managerRuntimeData.postSynapticIds[cumN+oldPos];
6934 // int post_nid = GET_CONN_NEURON_ID(postInfo);
6935 // int post_sid = GET_CONN_SYN_ID(postInfo);
6936 //
6937 // SynInfo* preId = &(managerRuntimeData.preSynapticIds[managerRuntimeData.cumulativePre[post_nid]+post_sid]);
6938 // int pre_nid = GET_CONN_NEURON_ID((*preId));
6939 // int pre_sid = GET_CONN_SYN_ID((*preId));
6940 // int pre_gid = GET_CONN_GRP_ID((*preId));
6941 // assert (pre_nid == nid);
6942 // assert (pre_sid == newPos);
6943 // *preId = SET_CONN_ID( pre_nid, oldPos, pre_gid);
6944 //
6945 // // update the pre-information for the postsynaptic neuron at the position newPos
6946 // postInfo = managerRuntimeData.postSynapticIds[cumN+newPos];
6947 // post_nid = GET_CONN_NEURON_ID(postInfo);
6948 // post_sid = GET_CONN_SYN_ID(postInfo);
6949 //
6950 // preId = &(managerRuntimeData.preSynapticIds[managerRuntimeData.cumulativePre[post_nid]+post_sid]);
6951 // pre_nid = GET_CONN_NEURON_ID((*preId));
6952 // pre_sid = GET_CONN_SYN_ID((*preId));
6953 // pre_gid = GET_CONN_GRP_ID((*preId));
6954 // assert (pre_nid == nid);
6955 // assert (pre_sid == oldPos);
6956 // *preId = SET_CONN_ID( pre_nid, newPos, pre_gid);
6957 //}
6958 
6959 // set one specific connection from neuron id 'src' to neuron id 'dest'
6960 //inline void SNN::setConnection(int srcGrp, int destGrp, unsigned int src, unsigned int dest, float synWt,
6961 // float maxWt, uint8_t dVal, int connProp, short int connId) {
6962 // assert(dest<=CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
6963 // assert((dVal >=1) && (dVal <= maxDelay_));
6964 //
6965 // // adjust sign of weight based on pre-group (negative if pre is inhibitory)
6966 // synWt = isExcitatoryGroup(srcGrp) ? fabs(synWt) : -1.0*fabs(synWt);
6967 // maxWt = isExcitatoryGroup(srcGrp) ? fabs(maxWt) : -1.0*fabs(maxWt);
6968 //
6969 // // we have exceeded the number of possible connection for one neuron
6970 // if(managerRuntimeData.Npost[src] >= groupConfigs[0][srcGrp].numPostSynapses) {
6971 // KERNEL_ERROR("setConnection(%d (Grp=%s), %d (Grp=%s), %f, %d)", src, groupInfo[srcGrp].Name.c_str(),
6972 // dest, groupInfo[destGrp].Name.c_str(), synWt, dVal);
6973 // KERNEL_ERROR("Large number of postsynaptic connections established (%d), max for this group %d.", managerRuntimeData.Npost[src], groupConfigs[0][srcGrp].numPostSynapses);
6974 // exitSimulation(1);
6975 // }
6976 //
6977 // if(managerRuntimeData.Npre[dest] >= groupConfigs[0][destGrp].numPreSynapses) {
6978 // KERNEL_ERROR("setConnection(%d (Grp=%s), %d (Grp=%s), %f, %d)", src, groupInfo[srcGrp].Name.c_str(),
6979 // dest, groupInfo[destGrp].Name.c_str(), synWt, dVal);
6980 // KERNEL_ERROR("Large number of presynaptic connections established (%d), max for this group %d.", managerRuntimeData.Npre[dest], groupConfigs[0][destGrp].numPreSynapses);
6981 // exitSimulation(1);
6982 // }
6983 //
6984 // int p = managerRuntimeData.Npost[src];
6985 //
6986 // assert(managerRuntimeData.Npost[src] >= 0);
6987 // assert(managerRuntimeData.Npre[dest] >= 0);
6988 // assert((src * maxNumPostSynGrp + p) / numN < maxNumPostSynGrp); // divide by numN to prevent INT overflow
6989 //
6990 // unsigned int post_pos = managerRuntimeData.cumulativePost[src] + managerRuntimeData.Npost[src];
6991 // unsigned int pre_pos = managerRuntimeData.cumulativePre[dest] + managerRuntimeData.Npre[dest];
6992 //
6993 // assert(post_pos < numPostSynNet);
6994 // assert(pre_pos < numPreSynNet);
6995 //
6996 // //generate a new postSynapticIds id for the current connection
6997 // managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID(dest, managerRuntimeData.Npre[dest], destGrp);
6998 // tmp_SynapticDelay[post_pos] = dVal;
6999 //
7000 // managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID(src, managerRuntimeData.Npost[src], srcGrp);
7001 // managerRuntimeData.wt[pre_pos] = synWt;
7002 // managerRuntimeData.maxSynWt[pre_pos] = maxWt;
7003 // managerRuntimeData.connIdsPreIdx[pre_pos] = connId;
7004 //
7005 // bool synWtType = GET_FIXED_PLASTIC(connProp);
7006 //
7007 // if (synWtType == SYN_PLASTIC) {
7008 // sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
7009 // managerRuntimeData.Npre_plastic[dest]++;
7010 // // homeostasis
7011 // if (groupConfigs[0][destGrp].WithHomeostasis && groupConfigs[0][destGrp].homeoId ==-1)
7012 // groupConfigs[0][destGrp].homeoId = dest; // this neuron info will be printed
7013 // }
7014 //
7015 // managerRuntimeData.Npre[dest] += 1;
7016 // managerRuntimeData.Npost[src] += 1;
7017 //
7018 // groupInfo[srcGrp].numPostConn++;
7019 // groupInfo[destGrp].numPreConn++;
7020 //
7021 // if (managerRuntimeData.Npost[src] > groupInfo[srcGrp].maxPostConn)
7022 // groupInfo[srcGrp].maxPostConn = managerRuntimeData.Npost[src];
7023 // if (managerRuntimeData.Npre[dest] > groupInfo[destGrp].maxPreConn)
7024 // groupInfo[destGrp].maxPreConn = managerRuntimeData.Npre[src];
7025 //}
SpikeBuffer::step
void step()
advance to next time step
Definition: spike_buffer.cpp:221
GroupMonitorCore::setLastUpdated
void setLastUpdated(unsigned int lastUpdate)
sets timestamp of last GroupMonitor update
Definition: group_monitor_core.h:147
GPU_MODE
@ GPU_MODE
model is run on GPU card(s)
Definition: carlsim_datastructures.h:116
SpikeBuffer::SpikeNode::neurId
int neurId
corresponding global neuron Id
Definition: spike_buffer.h:91
INTERVAL_100MS
@ INTERVAL_100MS
the update interval will be 100 ms, which is 10Hz update frequency
Definition: carlsim_datastructures.h:240
NetworkConfigRT_s::wtChangeDecay
float wtChangeDecay
the wtChange decay
Definition: snn_datastructures.h:677
compareDelay
bool compareDelay(const ConnectionInfo &first, const ConnectionInfo &second)
Definition: snn_manager.cpp:3220
ThreadStruct_s::endIdx
int endIdx
Definition: snn_datastructures.h:728
RuntimeData_s::gNMDA
float * gNMDA
conductance of gNMDA
Definition: snn_datastructures.h:495
SpikeGeneratorCore::nextSpikeTime
virtual int nextSpikeTime(SNN *s, int grpId, int i, int currentTime, int lastScheduledSpikeTime, int endOfTimeSlice)
controls spike generation using a callback mechanism
Definition: callback_core.cpp:62
RuntimeData_s::Izh_d
float * Izh_d
Definition: snn_datastructures.h:476
SNN::getNeuronMonitor
NeuronMonitor * getNeuronMonitor(int grpId)
Returns pointer to existing NeuronMonitor object, NULL else.
Definition: snn_manager.cpp:1981
ConnectionInfo_s::delay
uint8_t delay
Definition: snn_datastructures.h:105
GroupSTDPInfo_s::TAU_MINUS_INV_EXC
float TAU_MINUS_INV_EXC
the inverse of time constant minus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:421
GroupConfigRT_s::WithISTDPcurve
STDPCurve WithISTDPcurve
published by GroupConfig
Definition: snn_datastructures.h:397
SNN::getSimTimeSec
int getSimTimeSec()
Definition: snn.h:582
POISSON_NEURON
#define POISSON_NEURON
Definition: carlsim_definitions.h:66
NetworkConfigRT_s::stdpScaleFactor
float stdpScaleFactor
Definition: snn_datastructures.h:676
Grid3D::offsetX
float offsetX
Definition: carlsim_datastructures.h:550
SNN::getGroupNumNeurons
int getGroupNumNeurons(int gGrpId)
Definition: snn.h:559
RuntimeData_s::totalCurrent
float * totalCurrent
Definition: snn_datastructures.h:478
GroupSTDPInfo_s::WithISTDPtype
STDPType WithISTDPtype
the type of I-STDP (STANDARD or DA_MOD)
Definition: carlsim_datastructures.h:417
SNN::setGroupMonitor
GroupMonitor * setGroupMonitor(int grpId, FILE *fid)
sets up a group monitor registered with a callback to process the spikes.
Definition: snn_manager.cpp:1088
SNN::setConductances
void setConductances(bool isSet, int tdAMPA, int trNMDA, int tdNMDA, int tdGABAa, int trGABAb, int tdGABAb)
Sets custom values for conductance decay (\tau_decay) or disables conductances alltogether These will...
Definition: snn_manager.cpp:409
TARGET_GABAb
#define TARGET_GABAb
Definition: carlsim_definitions.h:70
RuntimeData_s::spikeCountExtRxD2
unsigned int spikeCountExtRxD2
the number of external spikes with axonal delay > 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:462
ConnectConfig_s::mulSynFast
float mulSynFast
factor to be applied to either gAMPA or gGABAa
Definition: snn_datastructures.h:129
SpikeMonitorCore::getAccumTime
long int getAccumTime()
returns the total accumulated time
Definition: spike_monitor_core.cpp:484
RuntimeData_s::grpNEBuffer
float * grpNEBuffer
Definition: snn_datastructures.h:581
STP_BUF_POS
#define STP_BUF_POS(nid, t, maxDelay)
Definition: snn_definitions.h:103
SynInfo_s
Definition: snn_datastructures.h:90
SimMode
SimMode
simulation mode
Definition: carlsim_datastructures.h:114
NetworkConfigRT_s::maxSpikesD2
unsigned int maxSpikesD2
the estimated maximum number of spikes with delay >= 2 in a network
Definition: snn_datastructures.h:652
integrationMethod_t
integrationMethod_t
Integration methods.
Definition: carlsim_datastructures.h:133
RuntimeData_s::lif_tau_ref
int * lif_tau_ref
Definition: snn_datastructures.h:482
AER
@ AER
mode in which spike information is collected in AER format
Definition: carlsim_datastructures.h:203
TIMING_COUNT
#define TIMING_COUNT
Definition: snn_definitions.h:156
UNKNOWN_LOGGER_ERROR
#define UNKNOWN_LOGGER_ERROR
Definition: error_code.h:94
GroupConfig_s::grid
Grid3D grid
Definition: snn_datastructures.h:321
connection_monitor_core.h
IS_EXCITATORY_TYPE
#define IS_EXCITATORY_TYPE(type)
Definition: carlsim_definitions.h:82
SpikeMonitorCore
Definition: spike_monitor_core.h:71
GroupSTDPInfo_s::TAU_PLUS_INV_EXC
float TAU_PLUS_INV_EXC
the inverse of time constant plus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:420
SNN::loadSimulation
void loadSimulation(FILE *fid)
Definition: snn_manager.cpp:1001
GroupNeuromodulatorInfo_s::baseDP
float baseDP
baseline concentration of Dopamine
Definition: carlsim_datastructures.h:445
GroupConfigRT_s::decayDP
float decayDP
decay rate for Dopaamine, published by GroupConfig
Definition: snn_datastructures.h:435
DelayInfo_s
Definition: snn_datastructures.h:85
SNN::getNumGroups
int getNumGroups()
Definition: snn.h:569
SNN::setNeuronParametersLIF
void setNeuronParametersLIF(int grpId, int tau_m, int tau_ref, float vTh, float vReset, double minRmem, double maxRmem)
Sets neuron parameters for a group of LIF spiking neurons.
Definition: snn_manager.cpp:575
SET_FIXED_PLASTIC
#define SET_FIXED_PLASTIC(a)
Definition: snn_definitions.h:202
CONN_FULL
@ CONN_FULL
Definition: snn_datastructures.h:75
RuntimeData_s::lif_tau_ref_c
int * lif_tau_ref_c
Definition: snn_datastructures.h:483
SNN::biasWeights
void biasWeights(short int connId, float bias, bool updateWeightRange=false)
Definition: snn_manager.cpp:916
RuntimeData_s::timeTableD2
unsigned int * timeTableD2
firing table, only used in CPU_MODE currently
Definition: snn_datastructures.h:544
RangeWeight
a range struct for synaptic weight magnitudes
Definition: carlsim_datastructures.h:312
NetworkConfigRT_s::dAMPA
double dAMPA
multiplication factor for decay time of AMPA conductance (gAMPA[i] *= dAMPA)
Definition: snn_datastructures.h:682
spike_monitor.h
SpikeBuffer::schedule
void schedule(int neurId, int grpId, unsigned short int delay)
Schedule a spike.
Definition: spike_buffer.cpp:220
SpikeMonitor
Class SpikeMonitor.
Definition: spike_monitor.h:120
SNN::connect
short int connect(int gIDpre, int gIDpost, const std::string &_type, float initWt, float maxWt, float prob, uint8_t minDelay, uint8_t maxDelay, RadiusRF radius, float mulSynFast, float mulSynSlow, bool synWtType)
make from each neuron in grpId1 to 'numPostSynapses' neurons in grpId2
Definition: snn_manager.cpp:96
NetworkConfigRT_s::rNMDA
double rNMDA
multiplication factor for rise time of NMDA
Definition: snn_datastructures.h:683
GroupConfigRT_s::LAMBDA
float LAMBDA
published by GroupConfig
Definition: snn_datastructures.h:421
NetworkConfigRT_s::sGABAb
double sGABAb
scaling factor for GABAb amplitude
Definition: snn_datastructures.h:689
SNN::isSimulationWithGABAbRise
bool isSimulationWithGABAbRise()
Definition: snn.h:637
SNN::saveSimulation
void saveSimulation(FILE *fid, bool saveSynapseInfo=false)
stores the pre and post synaptic neuron ids with the weight and delay
Definition: snn_manager.cpp:1411
RuntimeData_s::wtChange
float * wtChange
stores the weight change of a synaptic connection
Definition: snn_datastructures.h:523
MAX_CONN_PER_SNN
#define MAX_CONN_PER_SNN
Definition: snn_definitions.h:132
RuntimeData_s::Izh_vr
float * Izh_vr
Definition: snn_datastructures.h:470
Grid3D::offsetZ
float offsetZ
Definition: carlsim_datastructures.h:550
GroupSTDPInfo_s::BETA_LTD
float BETA_LTD
the amplitude of inhibitory LTD if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:430
MAX_GRP_PER_SNN
#define MAX_GRP_PER_SNN
Definition: snn_definitions.h:133
GroupConfigRT_s::WithESTDP
bool WithESTDP
published by GroupConfig
Definition: snn_datastructures.h:392
GlobalNetworkConfig_s::numN2msDelay
int numN2msDelay
number of neurons with maximum out going axonal delay >= 2 ms
Definition: snn_datastructures.h:613
GroupConfigRT_s::WithESTDPtype
STDPType WithESTDPtype
published by GroupConfig
Definition: snn_datastructures.h:394
RuntimeData_s
Definition: snn_datastructures.h:451
NetworkConfigRT_s::dNMDA
double dNMDA
multiplication factor for decay time of NMDA
Definition: snn_datastructures.h:684
SNN::getConductanceGABAa
std::vector< float > getConductanceGABAa(int grpId)
Definition: snn_manager.cpp:1761
ConnectConfig_s::initWt
float initWt
Definition: snn_datastructures.h:126
SNN::getGroupSTDPInfo
GroupSTDPInfo getGroupSTDPInfo(int grpId)
Definition: snn_manager.cpp:1874
RadiusRF::radX
double radX
Definition: carlsim_datastructures.h:373
Grid3D::distX
float distX
Definition: carlsim_datastructures.h:549
GroupSTDPInfo_s::WithSTDP
bool WithSTDP
enable STDP flag
Definition: carlsim_datastructures.h:413
compConnectConfig_s::grpDest
int grpDest
Definition: snn_datastructures.h:181
SNN::setSpikeGenerator
void setSpikeGenerator(int grpId, SpikeGeneratorCore *spikeGenFunc)
sets up a spike generator
Definition: snn_manager.cpp:1174
SNN::isPoint3DinRF
bool isPoint3DinRF(const RadiusRF &radius, const Point3D &pre, const Point3D &post)
Definition: snn_manager.cpp:5062
RuntimeData_s::nextVoltage
float * nextVoltage
membrane potential buffer (next/future time step) for each regular neuron
Definition: snn_datastructures.h:466
GroupSTDPInfo_s::ALPHA_MINUS_EXC
float ALPHA_MINUS_EXC
the amplitude of alpha minus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:423
NetworkConfigRT_s::numNSpikeGen
int numNSpikeGen
number of poisson neurons generating spikes based on callback functions
Definition: snn_datastructures.h:639
RuntimeData_s::baseFiring
float * baseFiring
Definition: snn_datastructures.h:566
GroupConfigRT_s::ALPHA_PLUS_INB
float ALPHA_PLUS_INB
published by GroupConfig
Definition: snn_datastructures.h:417
EXP_CURVE
@ EXP_CURVE
standard exponential curve
Definition: carlsim_datastructures.h:179
COMPILED_SNN
@ COMPILED_SNN
Definition: snn_datastructures.h:80
ConnectConfig_s::grpSrc
int grpSrc
Definition: snn_datastructures.h:121
SNN::runNetwork
int runNetwork(int _nsec, int _nmsec, bool printRunSummary)
run the simulation for n sec
Definition: snn_manager.cpp:794
SNN::setWeightAndWeightChangeUpdate
void setWeightAndWeightChangeUpdate(UpdateInterval wtANDwtChangeUpdateInterval, bool enableWtChangeDecay, float wtChangeDecay)
Sets the weight and weight change update parameters.
Definition: snn_manager.cpp:724
CONN_UNKNOWN
@ CONN_UNKNOWN
Definition: snn_datastructures.h:75
NetworkConfigRT_s::dGABAa
double dGABAa
multiplication factor for decay time of GABAa
Definition: snn_datastructures.h:686
GroupSTDPInfo_s::TAU_MINUS_INV_INB
float TAU_MINUS_INV_INB
the inverse of tau minus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:425
RuntimeData_s::stpx
float * stpx
Definition: snn_datastructures.h:512
GroupSTDPInfo_s::ALPHA_PLUS_EXC
float ALPHA_PLUS_EXC
the amplitude of alpha plus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:422
SNN::getNeuronMonitorCore
NeuronMonitorCore * getNeuronMonitorCore(int grpId)
Definition: snn_manager.cpp:1992
NetworkConfigRT_s::rGABAb
double rGABAb
multiplication factor for rise time of GABAb
Definition: snn_datastructures.h:687
RuntimeData_s::Izh_vpeak
float * Izh_vpeak
Definition: snn_datastructures.h:472
ConnectionMonitorCore
Definition: connection_monitor_core.h:68
NetworkConfigRT_s::sim_with_GABAb_rise
bool sim_with_GABAb_rise
a flag to inform whether to compute GABAb rise time
Definition: snn_datastructures.h:681
group_monitor_core.h
SpikeGeneratorCore
used for relaying callback to SpikeGenerator
Definition: callback_core.h:70
CHECK_CONNECTION_ID
#define CHECK_CONNECTION_ID(n, total)
< Used for in the function getConnectionId
Definition: snn_definitions.h:88
GroupConfigRT_s::TAU_PLUS_INV_EXC
float TAU_PLUS_INV_EXC
published by GroupConfig
Definition: snn_datastructures.h:408
SNN::updateGroupMonitor
void updateGroupMonitor(int grpId=ALL)
access group status (currently the concentration of neuromodulator)
Definition: snn_manager.cpp:6280
SNN::getSpikeMonitorCore
SpikeMonitorCore * getSpikeMonitorCore(int grpId)
Definition: snn_manager.cpp:1970
SNN::isConnectionPlastic
bool isConnectionPlastic(short int connId)
returns whether synapses in connection are fixed (false) or plastic (true)
Definition: snn_manager.cpp:4884
NeuronMonitorCore::getLastUpdated
long int getLastUpdated()
returns timestamp of last NeuronMonitor update
Definition: neuron_monitor_core.h:94
RuntimeData_s::Izh_b
float * Izh_b
Definition: snn_datastructures.h:474
SNN::getRFDist3D
double getRFDist3D(const RadiusRF &radius, const Point3D &pre, const Point3D &post)
checks whether a point pre lies in the receptive field for point post
Definition: snn_manager.cpp:5071
GroupConfigRT_s::numPostSynapses
int numPostSynapses
the total number of post-connections of a group, published by GroupConfigMD
Definition: snn_datastructures.h:386
RuntimeData_s::spikeCount
unsigned int spikeCount
the total number of spikes in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:457
ConnectionInfo_s::grpDest
int grpDest
Definition: snn_datastructures.h:97
RuntimeData_s::spikeCountD2Sec
unsigned int spikeCountD2Sec
the total number of spikes with axonal delay > 1 in 1 second, used in CPU_MODE currently
Definition: snn_datastructures.h:454
SNN::setExternalCurrent
void setExternalCurrent(int grpId, const std::vector< float > &current)
injects current (mA) into the soma of every neuron in the group
Definition: snn_manager.cpp:1378
NeuronMonitorCore::pushNeuronState
void pushNeuronState(int neurId, float V, float U, float I)
inserts a (time,neurId) tupel into the D Neuron State vector
Definition: neuron_monitor_core.cpp:118
GroupConfig_s::type
unsigned int type
Definition: snn_datastructures.h:309
SHOWTIME
@ SHOWTIME
Showtime mode, will only output warnings and errors.
Definition: carlsim_datastructures.h:94
SNN::setLogsFp
void setLogsFp(FILE *fpInf=NULL, FILE *fpErr=NULL, FILE *fpDeb=NULL, FILE *fpLog=NULL)
Sets the file pointers for all log files file pointer NULL means don't change it.
Definition: snn_manager.cpp:1672
RuntimeData_s::gGABAa
float * gGABAa
conductance of gGABAa
Definition: snn_datastructures.h:499
NetworkConfigRT_s::sim_with_nm
bool sim_with_nm
Definition: snn_datastructures.h:673
GroupConfigRT_s::GtoLOffset
int GtoLOffset
published by GroupConfigMD
Definition: snn_datastructures.h:383
NetworkConfigRT_s
runtime network configuration
Definition: snn_datastructures.h:626
SET_CONN_PRESENT
#define SET_CONN_PRESENT(a)
Definition: snn_definitions.h:201
GroupConfigRT_s::baseDP
float baseDP
baseline concentration of Dopamine, published by GroupConfig
Definition: snn_datastructures.h:431
RuntimeData_s::current
float * current
Definition: snn_datastructures.h:477
GroupConfigRT_s::withCompartments
bool withCompartments
Definition: snn_datastructures.h:443
RuntimeData_s::Npre
unsigned short * Npre
stores the number of input connections to a neuron
Definition: snn_datastructures.h:515
GroupConfigRT_s::WithISTDPtype
STDPType WithISTDPtype
published by GroupConfig
Definition: snn_datastructures.h:395
RuntimeData_s::grpAChBuffer
float * grpAChBuffer
Definition: snn_datastructures.h:580
GroupSTDPInfo_s
A struct for retrieving STDP related information of a group.
Definition: carlsim_datastructures.h:412
GlobalNetworkConfig_s::simIntegrationMethod
integrationMethod_t simIntegrationMethod
integration method (forward-Euler or Fourth-order Runge-Kutta)
Definition: snn_datastructures.h:615
SNN::getConnectConfig
ConnectConfig getConnectConfig(short int connectId)
required for homeostasis
Definition: snn_manager.cpp:1717
NetworkConfigRT_s::sim_with_stdp
bool sim_with_stdp
Definition: snn_datastructures.h:665
Grid3D::numX
int numX
Definition: carlsim_datastructures.h:548
SpikeMonitorCore::pushAER
void pushAER(int time, int neurId)
inserts a (time,neurId) tupel into the 2D spike vector
Definition: spike_monitor_core.cpp:304
ConnectionGeneratorCore
used for relaying callback to ConnectionGenerator
Definition: callback_core.h:90
NUM_SYNAPSE_BITS
#define NUM_SYNAPSE_BITS
Definition: snn_definitions.h:188
error_code.h
ConnectionInfo_s::connId
short int connId
Definition: snn_datastructures.h:104
SpikeMonitorCore::isRecording
bool isRecording()
returns recording status
Definition: spike_monitor_core.h:152
GroupConfigRT_s::netId
int netId
published by GroupConfigMD
Definition: snn_datastructures.h:375
GroupConfigRT_s::decayACh
float decayACh
decay rate for Acetylcholine, published by GroupConfig
Definition: snn_datastructures.h:437
PoissonRate
Class for generating Poisson spike trains.
Definition: poisson_rate.h:84
GroupConfigRT_s::isSpikeGenerator
bool isSpikeGenerator
published by GroupConfig
Definition: snn_datastructures.h:388
GroupConfigRT_s::numPreSynapses
int numPreSynapses
the total number of pre-connections of a group, published by GroupConfigMD
Definition: snn_datastructures.h:387
GlobalNetworkConfig_s::numSynNet
int numSynNet
number of total synaptic connections in the global network
Definition: snn_datastructures.h:610
RuntimeData_s::spikeGenBits
unsigned int * spikeGenBits
Definition: snn_datastructures.h:588
ConnectConfig_s::connId
short int connId
connectID of the element in the linked list
Definition: snn_datastructures.h:136
SNN::getConductanceNMDA
std::vector< float > getConductanceNMDA(int grpId)
Definition: snn_manager.cpp:1741
SNN::setCompartmentParameters
void setCompartmentParameters(int grpId, float couplingUp, float couplingDown)
Coupling constants for the compartment are set using this method.
Definition: snn_manager.cpp:393
GroupNeuromodulatorInfo_s::decayACh
float decayACh
decay rate for Acetylcholine
Definition: carlsim_datastructures.h:451
RuntimeData_s::lif_vTh
float * lif_vTh
Definition: snn_datastructures.h:484
NetworkConfigRT_s::numConnections
int numConnections
number of local connections in this local network
Definition: snn_datastructures.h:658
RuntimeData_s::recovery
float * recovery
Definition: snn_datastructures.h:467
INTERVAL_1000MS
@ INTERVAL_1000MS
the update interval will be 1000 ms, which is 1Hz update frequency
Definition: carlsim_datastructures.h:241
GPU_RUNTIME_BASE
#define GPU_RUNTIME_BASE
Definition: snn_definitions.h:144
RuntimeData_s::gAMPA
float * gAMPA
conductance of gAMPA
Definition: snn_datastructures.h:498
GroupConfigRT_s::OMEGA
float OMEGA
published by GroupConfig
Definition: snn_datastructures.h:414
NetworkConfigRT_s::dGABAb
double dGABAb
multiplication factor for decay time of GABAb
Definition: snn_datastructures.h:688
NetworkConfigRT_s::maxDelay
int maxDelay
maximum axonal delay in the gloabl network
Definition: snn_datastructures.h:628
SNN::startTesting
void startTesting(bool shallUpdateWeights=true)
enters a testing phase, where all weight updates are disabled
Definition: snn_manager.cpp:6181
NetworkConfigRT_s::sim_with_conductances
bool sim_with_conductances
Definition: snn_datastructures.h:663
ConnectConfig_s::mulSynSlow
float mulSynSlow
factor to be applied to either gNMDA or gGABAb
Definition: snn_datastructures.h:130
ConnectionInfo_s::grpSrc
int grpSrc
Definition: snn_datastructures.h:96
compConnectConfig_s::grpSrc
int grpSrc
Definition: snn_datastructures.h:181
MAX_TIME_SLICE
#define MAX_TIME_SLICE
Definition: snn_definitions.h:152
RuntimeData_s::lif_gain
float * lif_gain
Definition: snn_datastructures.h:486
RuntimeData_s::Izh_vt
float * Izh_vt
Definition: snn_datastructures.h:471
NUM_CPU_CORES
#define NUM_CPU_CORES
Definition: snn_definitions.h:142
KERNEL_WARN
#define KERNEL_WARN(formatc,...)
Definition: snn_definitions.h:112
NeuronMonitorCore::isRecording
bool isRecording()
returns recording status
Definition: neuron_monitor_core.h:73
USER
@ USER
User mode, for experiment-oriented simulations.
Definition: carlsim_datastructures.h:92
SNN::setNeuromodulator
void setNeuromodulator(int grpId, float baseDP, float tauDP, float base5HT, float tau5HT, float baseACh, float tauACh, float baseNE, float tauNE)
Sets baseline concentration and decay time constant of neuromodulators (DP, 5HT, ACh,...
Definition: snn_manager.cpp:597
GroupConfigRT_s::GAMMA
float GAMMA
published by GroupConfig
Definition: snn_datastructures.h:412
RuntimeData_s::Npost
unsigned short * Npost
stores the number of output connections from a neuron.
Definition: snn_datastructures.h:518
GroupConfigRT_s::numN
int numN
published by GroupConfig
Definition: snn_datastructures.h:385
GroupConfig_s::numN
int numN
Definition: snn_datastructures.h:310
SNN::updateConnectionMonitor
void updateConnectionMonitor(short int connId=ALL)
polls connection weights
Definition: snn_manager.cpp:6219
ThreadStruct_s
CPU multithreading subroutine (that takes single argument) struct argument.
Definition: snn_datastructures.h:723
GlobalNetworkConfig_s::maxDelay
int maxDelay
maximum axonal delay in the gloabl network
Definition: snn_datastructures.h:611
GroupConfigRT_s::isSpikeGenFunc
bool isSpikeGenFunc
published by GroupConfig
Definition: snn_datastructures.h:389
GroupConfigRT_s::hasExternalConnect
bool hasExternalConnect
published by GroupConfigMD
Definition: snn_datastructures.h:400
RuntimeData_s::extCurrent
float * extCurrent
Definition: snn_datastructures.h:479
GroupConfigRT_s::decay5HT
float decay5HT
decay rate for Serotonin, published by GroupConfig
Definition: snn_datastructures.h:436
GroupMonitorCore
GroupMonitor private core implementation.
Definition: group_monitor_core.h:64
SNN::setSpikeMonitor
SpikeMonitor * setSpikeMonitor(int gid, FILE *fid)
sets up a spike monitor registered with a callback to process the spikes, there can only be one Spike...
Definition: snn_manager.cpp:1182
CONN_USER_DEFINED
@ CONN_USER_DEFINED
Definition: snn_datastructures.h:75
RuntimeData_s::nUBuffer
float * nUBuffer
Definition: snn_datastructures.h:585
CONN_RANDOM
@ CONN_RANDOM
Definition: snn_datastructures.h:75
ConnectionInfo_s::nSrc
int nSrc
Definition: snn_datastructures.h:98
RuntimeData_s::nPoissonSpikes
unsigned int nPoissonSpikes
the total number of spikes of poisson neurons, used in CPU_MODE currently
Definition: snn_datastructures.h:460
RuntimeData_s::wt
float * wt
stores the weight change of a synaptic connection
Definition: snn_datastructures.h:524
RuntimeData_s::nVBuffer
float * nVBuffer
Definition: snn_datastructures.h:584
MAX_NEURON_MON_GRP_SZIE
#define MAX_NEURON_MON_GRP_SZIE
Definition: snn_definitions.h:165
GroupConfigRT_s::isLIF
bool isLIF
True = a LIF spiking group.
Definition: snn_datastructures.h:441
GroupConfigRT_s::Type
unsigned int Type
published by GroupConfig
Definition: snn_datastructures.h:384
NetworkConfigRT_s::sNMDA
double sNMDA
scaling factor for NMDA amplitude
Definition: snn_datastructures.h:685
CPU_RUNTIME_BASE
#define CPU_RUNTIME_BASE
Definition: snn_definitions.h:139
FORWARD_EULER
@ FORWARD_EULER
Definition: carlsim_datastructures.h:134
Grid3D::distY
float distY
Definition: carlsim_datastructures.h:549
group_monitor.h
GroupConfigRT_s::gEndN
int gEndN
published by GroupConfigMD
Definition: snn_datastructures.h:378
SNN::updateNeuronMonitor
void updateNeuronMonitor(int grpId=ALL)
copy required neuron state values from ??? buffer to ??? buffer
Definition: snn_manager.cpp:6590
RuntimeData_s::curSpike
bool * curSpike
Definition: snn_datastructures.h:492
GroupNeuromodulatorInfo_s::base5HT
float base5HT
baseline concentration of Serotonin
Definition: carlsim_datastructures.h:446
GroupConfigRT_s::TAU_MINUS_INV_INB
float TAU_MINUS_INV_INB
published by GroupConfig
Definition: snn_datastructures.h:416
GroupConfigMD_s::gGrpId
int gGrpId
Definition: snn_datastructures.h:337
ConnectionInfo_s::preSynId
int preSynId
Definition: snn_datastructures.h:103
GroupConfigRT_s::gGrpId
int gGrpId
published by GroupConfigMD
Definition: snn_datastructures.h:376
SNN::MINOR_VERSION
static const unsigned int MINOR_VERSION
minor release version, as in CARLsim 2.X
Definition: snn.h:139
HYBRID_MODE
@ HYBRID_MODE
model is run on CPU Core(s), GPU card(s) or both
Definition: carlsim_datastructures.h:117
GroupConfigRT_s::compNeighbors
int compNeighbors[4]
Definition: snn_datastructures.h:446
RuntimeData_s::postDelayInfo
DelayInfo * postDelayInfo
delay information
Definition: snn_datastructures.h:542
RuntimeData_s::timeTableD1
unsigned int * timeTableD1
firing table, only used in CPU_MODE currently
Definition: snn_datastructures.h:543
compConnectConfig_s
The configuration of a compartmental connection.
Definition: snn_datastructures.h:180
ConnectConfig_s::maxDelay
uint8_t maxDelay
Definition: snn_datastructures.h:123
ConnectConfig_s::grpDest
int grpDest
Definition: snn_datastructures.h:122
SNN::createGroup
int createGroup(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a group of Izhikevich spiking neurons.
Definition: snn_manager.cpp:252
GROUP_ID_MASK
#define GROUP_ID_MASK
Definition: snn_definitions.h:185
GlobalNetworkConfig_s::numNPois
int numNPois
number of poisson neurons in the global network
Definition: snn_datastructures.h:609
SNN::getGroupNeuromodulatorInfo
GroupNeuromodulatorInfo getGroupNeuromodulatorInfo(int grpId)
Definition: snn_manager.cpp:1901
RangeDelay
a range struct for synaptic delays
Definition: carlsim_datastructures.h:279
ConnectionInfo_s::initWt
float initWt
Definition: snn_datastructures.h:101
GroupNeuromodulatorInfo_s::baseACh
float baseACh
baseline concentration of Acetylcholine
Definition: carlsim_datastructures.h:447
PoissonRate::getNumNeurons
int getNumNeurons()
Returns the number of neurons for which to generate Poisson spike trains.
Definition: poisson_rate.cpp:222
SNN::~SNN
~SNN()
SNN Destructor.
Definition: snn_manager.cpp:86
RuntimeData_s::voltage
float * voltage
membrane potential for each regular neuron
Definition: snn_datastructures.h:465
GroupConfig_s::preferredNetId
int preferredNetId
Definition: snn_datastructures.h:308
neuron_monitor_core.h
SNN::setupNetwork
void setupNetwork()
build the network
Definition: snn_manager.cpp:773
GroupSTDPInfo_s::DELTA
float DELTA
the range of inhibitory LTD if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:432
SNN::stopTesting
void stopTesting()
exits a testing phase, making weight updates possible again
Definition: snn_manager.cpp:6208
GroupConfigRT_s::avgTimeScaleInv
float avgTimeScaleInv
published by GroupConfig
Definition: snn_datastructures.h:427
ThreadStruct_s::netId
int netId
Definition: snn_datastructures.h:725
RuntimeData_s::Npre_plastic
unsigned short * Npre_plastic
stores the number of plastic input connections to a neuron
Definition: snn_datastructures.h:516
RuntimeData_s::spikeCountD1Sec
unsigned int spikeCountD1Sec
the total number of spikes with axonal delay == 1 in 1 second, used in CPU_MODE currently
Definition: snn_datastructures.h:453
PARTITIONED_SNN
@ PARTITIONED_SNN
Definition: snn_datastructures.h:81
GET_CONN_GRP_ID
#define GET_CONN_GRP_ID(val)
Definition: snn_definitions.h:192
SNN::MAJOR_VERSION
static const unsigned int MAJOR_VERSION
major release version, as in CARLsim X
Definition: snn.h:138
GroupConfigRT_s::lGrpId
int lGrpId
published by GroupConfigMD
Definition: snn_datastructures.h:379
INTERVAL_10MS
@ INTERVAL_10MS
the update interval will be 10 ms, which is 100Hz update frequency
Definition: carlsim_datastructures.h:239
GroupConfigRT_s::compCouplingUp
float compCouplingUp
Definition: snn_datastructures.h:444
NEURON_MAX_FIRING_RATE
#define NEURON_MAX_FIRING_RATE
Definition: snn_definitions.h:148
GroupMonitor
Class GroupMonitor.
Definition: group_monitor.h:104
NetworkConfigRT_s::sim_with_stp
bool sim_with_stp
Definition: snn_datastructures.h:668
NetworkConfigRT_s::maxSpikesD1
unsigned int maxSpikesD1
the estimated maximum number of spikes with delay == 1 in a network
Definition: snn_datastructures.h:653
GlobalNetworkConfig_s::numNExcPois
int numNExcPois
number of excitatory poisson neurons in the global network
Definition: snn_datastructures.h:607
RuntimeData_s::Izh_C
float * Izh_C
Definition: snn_datastructures.h:468
LARGE_SPIKE_MON_GRP_SIZE
#define LARGE_SPIKE_MON_GRP_SIZE
Definition: snn_definitions.h:161
GroupConfigRT_s::base5HT
float base5HT
baseline concentration of Serotonin, published by GroupConfig
Definition: snn_datastructures.h:432
GroupNeuromodulatorInfo_s::baseNE
float baseNE
baseline concentration of Noradrenaline
Definition: carlsim_datastructures.h:448
GroupConfigRT_s::compCoupling
float compCoupling[4]
Definition: snn_datastructures.h:447
SNN::setNeuronMonitor
NeuronMonitor * setNeuronMonitor(int gid, FILE *fid)
sets up a neuron monitor registered with a callback to process the neuron state values,...
Definition: snn_manager.cpp:1222
GlobalNetworkConfig_s::numN1msDelay
int numN1msDelay
number of neurons with maximum out going axonal delay = 1 ms
Definition: snn_datastructures.h:612
GroupConfigRT_s::baseNE
float baseNE
baseline concentration of Noradrenaline, published by GroupConfig
Definition: snn_datastructures.h:434
RuntimeData_s::Izh_c
float * Izh_c
Definition: snn_datastructures.h:475
SNN::isPoissonGroup
bool isPoissonGroup(int gGrpId)
Definition: snn.h:623
RuntimeData_s::gNMDA_r
float * gNMDA_r
Definition: snn_datastructures.h:496
GroupMonitorCore::getGroupFileId
FILE * getGroupFileId()
returns a pointer to the group data file
Definition: group_monitor_core.h:138
NeuronMonitorCore::setNeuronFileId
void setNeuronFileId(FILE *neuronFileId)
sets pointer to Neuron file
Definition: neuron_monitor_core.cpp:178
ConnectionInfo_s::nDest
int nDest
Definition: snn_datastructures.h:99
GroupMonitorCore::isRecording
bool isRecording()
returns recording status
Definition: group_monitor_core.h:100
MAX_SIMULATION_TIME
#define MAX_SIMULATION_TIME
Definition: snn_definitions.h:153
ALL
#define ALL
CARLsim common definitions.
Definition: carlsim_definitions.h:56
RuntimeData_s::nIBuffer
float * nIBuffer
Definition: snn_datastructures.h:586
connection_monitor.h
SNN::getWeightMatrix2D
std::vector< std::vector< float > > getWeightMatrix2D(short int connId)
Definition: snn_manager.cpp:6233
GroupConfigRT_s::WithHomeostasis
bool WithHomeostasis
published by GroupConfig
Definition: snn_datastructures.h:398
RuntimeData_s::lastSpikeTime
int * lastSpikeTime
stores the last spike time of a neuron
Definition: snn_datastructures.h:520
compConnectConfig_s::connId
short int connId
Definition: snn_datastructures.h:182
NetworkConfigRT_s::simIntegrationMethod
integrationMethod_t simIntegrationMethod
integration method (forward-Euler or Fourth-order Runge-Kutta)
Definition: snn_datastructures.h:691
SNN::getGroupId
int getGroupId(std::string grpName)
Definition: snn_manager.cpp:1853
ComputingBackend
ComputingBackend
computing backend
Definition: carlsim_datastructures.h:148
SpikeBuffer::back
SpikeIterator back()
pointer to the back of the spike buffer
Definition: spike_buffer.cpp:225
NeuronMonitorCore
Definition: neuron_monitor_core.h:59
KERNEL_ERROR
#define KERNEL_ERROR(formatc,...)
Definition: snn_definitions.h:110
ConnectConfig_s::minDelay
uint8_t minDelay
Definition: snn_datastructures.h:124
NetworkConfigRT_s::numPreSynNet
int numPreSynNet
the total number of pre-connections in a network
Definition: snn_datastructures.h:649
GroupConfigRT_s::baseACh
float baseACh
baseline concentration of Acetylcholine, published by GroupConfig
Definition: snn_datastructures.h:433
CONN_GAUSSIAN
@ CONN_GAUSSIAN
Definition: snn_datastructures.h:75
RuntimeData_s::spikeCountExtRxD1
unsigned int spikeCountExtRxD1
the number of external spikes with axonal delay == 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:463
spike_buffer.h
SNN::getSpikeMonitor
SpikeMonitor * getSpikeMonitor(int grpId)
Returns pointer to existing SpikeMonitor object, NULL else.
Definition: snn_manager.cpp:1960
GroupConfigRT_s::avgTimeScale_decay
float avgTimeScale_decay
published by GroupConfig
Definition: snn_datastructures.h:426
NetworkConfigRT_s::numPostSynNet
int numPostSynNet
the total number of post-connections in a network
Definition: snn_datastructures.h:648
KERNEL_INFO
#define KERNEL_INFO(formatc,...)
Definition: snn_definitions.h:114
SNN::createSpikeGeneratorGroup
int createSpikeGeneratorGroup(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a spike generator group (dummy-neurons, not Izhikevich spiking neurons)
Definition: snn_manager.cpp:349
RuntimeData_s::grpDABuffer
float * grpDABuffer
Definition: snn_datastructures.h:578
Grid3D::numY
int numY
Definition: carlsim_datastructures.h:548
DelayInfo_s::delay_index_start
short delay_index_start
Definition: snn_datastructures.h:86
ConnectConfig_s::connProp
uint32_t connProp
Definition: snn_datastructures.h:132
GroupConfigRT_s::WithSTDP
bool WithSTDP
published by GroupConfig
Definition: snn_datastructures.h:391
RuntimeData_s::postSynapticIds
SynInfo * postSynapticIds
10 bit syn id, 22 bit neuron id, ordered based on delay
Definition: snn_datastructures.h:539
SNN::isSimulationWithNMDARise
bool isSimulationWithNMDARise()
Definition: snn.h:636
GroupConfigRT_s::Noffset
int Noffset
the offset of spike generator (poisson) neurons [0, numNPois), published by GroupConfigMD
Definition: snn_datastructures.h:401
RuntimeData_s::spikeCountSec
unsigned int spikeCountSec
the total number of spikes in 1 second, used in CPU_MODE currently
Definition: snn_datastructures.h:452
SNN::getConductanceAMPA
std::vector< float > getConductanceAMPA(int grpId)
Definition: snn_manager.cpp:1728
CPU_MODE
@ CPU_MODE
model is run on CPU core(s)
Definition: carlsim_datastructures.h:115
CPU_MEM
@ CPU_MEM
runtime data is allocated on CPU (main) memory
Definition: snn_datastructures.h:70
SNN::SNN
SNN(const std::string &name, SimMode preferredSimMode, LoggerMode loggerMode, int randSeed)
SNN Constructor.
Definition: snn_manager.cpp:77
ThreadStruct_s::snn_pointer
void * snn_pointer
Definition: snn_datastructures.h:724
SNN::setWeight
void setWeight(short int connId, int neurIdPre, int neurIdPost, float weight, bool updateWeightRange=false)
sets the weight value of a specific synapse
Definition: snn_manager.cpp:1287
MAX_NUM_COMP_CONN
#define MAX_NUM_COMP_CONN
Definition: carlsim_definitions.h:91
KERNEL_DEBUG
#define KERNEL_DEBUG(formatc,...)
Definition: snn_definitions.h:116
RuntimeData_s::connIdsPreIdx
short int * connIdsPreIdx
connectId, per synapse, presynaptic cumulative indexing
Definition: snn_datastructures.h:530
RuntimeData_s::firingTableD1
int * firingTableD1
Definition: snn_datastructures.h:546
ThreadStruct_s::startIdx
int startIdx
Definition: snn_datastructures.h:727
RuntimeData_s::spikeCountD2
unsigned int spikeCountD2
the total number of spikes with anxonal delay > 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:459
RuntimeData_s::cumulativePost
unsigned int * cumulativePost
Definition: snn_datastructures.h:527
UNKNOWN_STDP
@ UNKNOWN_STDP
Definition: carlsim_datastructures.h:164
SNN::getNeuronLocation3D
Point3D getNeuronLocation3D(int neurId)
Definition: snn_manager.cpp:1916
GroupNeuromodulatorInfo_s::decayNE
float decayNE
decay rate for Noradrenaline
Definition: carlsim_datastructures.h:452
SNN::setIntegrationMethod
void setIntegrationMethod(integrationMethod_t method, int numStepsPerMs)
Sets the integration method and the number of integration steps per 1ms simulation time step.
Definition: snn_manager.cpp:497
SYNAPSE_ID_MASK
#define SYNAPSE_ID_MASK
Definition: snn_definitions.h:186
SNN::scaleWeights
void scaleWeights(short int connId, float scale, bool updateWeightRange=false)
Definition: snn_manager.cpp:1006
RuntimeData_s::spikeCountD1
unsigned int spikeCountD1
the total number of spikes with anxonal delay == 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:458
RuntimeData_s::maxSynWt
float * maxSynWt
maximum synaptic weight for a connection
Definition: snn_datastructures.h:525
GroupConfigRT_s::numCompNeighbors
short numCompNeighbors
Definition: snn_datastructures.h:448
GroupConfigRT_s::lStartN
int lStartN
published by GroupConfigMD
Definition: snn_datastructures.h:380
neuron_monitor.h
STDPCurve
STDPCurve
STDP curves.
Definition: carlsim_datastructures.h:178
ThreadStruct_s::GtoLOffset
int GtoLOffset
Definition: snn_datastructures.h:729
SpikeMonitorCore::setLastUpdated
void setLastUpdated(long int lastUpdate)
sets timestamp of last SpikeMonitor update
Definition: spike_monitor_core.h:188
RuntimeData_s::Izh_k
float * Izh_k
Definition: snn_datastructures.h:469
Point3D
a point in 3D space
Definition: linear_algebra.h:57
RuntimeData_s::extFiringTableEndIdxD1
int * extFiringTableEndIdxD1
Definition: snn_datastructures.h:552
ConnectConfig_s::connectionMonitorId
int connectionMonitorId
Definition: snn_datastructures.h:131
compareSrcNeuron
bool compareSrcNeuron(const ConnectionInfo &first, const ConnectionInfo &second)
Definition: snn_manager.cpp:3216
SpikeMonitorCore::getSpikeFileId
FILE * getSpikeFileId()
returns a pointer to the spike file
Definition: spike_monitor_core.h:179
SNN::getSimTimeMs
int getSimTimeMs()
Definition: snn.h:583
NO_LOGGER_DIR_ERROR
#define NO_LOGGER_DIR_ERROR
Definition: error_code.h:95
GlobalNetworkConfig_s::numNInhPois
int numNInhPois
number of inhibitory poisson neurons in the global network
Definition: snn_datastructures.h:608
RuntimeData_s::synSpikeTime
int * synSpikeTime
stores the last spike time of a synapse
Definition: snn_datastructures.h:521
SpikeMonitorCore::isBufferBig
bool isBufferBig()
returns true if spike buffer is close to maxAllowedBufferSize
Definition: spike_monitor_core.cpp:468
GroupConfigRT_s::MaxDelay
int8_t MaxDelay
published by GroupConfigMD
Definition: snn_datastructures.h:402
SYN_PLASTIC
#define SYN_PLASTIC
Definition: carlsim_definitions.h:61
Grid3D
A struct to arrange neurons on a 3D grid (a primitive cubic Bravais lattice with cubic side length 1)
Definition: carlsim_datastructures.h:490
CUSTOM
@ CUSTOM
Custom mode, the user can set the location of all the file pointers.
Definition: carlsim_datastructures.h:96
SNN::connectCompartments
short int connectCompartments(int grpIdLower, int grpIdUpper)
Definition: snn_manager.cpp:217
RuntimeData_s::preSynapticIds
SynInfo * preSynapticIds
Definition: snn_datastructures.h:540
SNN::setHomeostasis
void setHomeostasis(int grpId, bool isSet, float homeoScale, float avgTimeScale)
Sets the homeostasis parameters. g is the grpID, enable=true(false) enables(disables) homeostasis,...
Definition: snn_manager.cpp:461
SNN::setSpikeRate
void setSpikeRate(int grpId, PoissonRate *spikeRate, int refPeriod)
Sets the Poisson spike rate for a group. For information on how to set up spikeRate,...
Definition: snn_manager.cpp:1271
Point3D::y
double y
Definition: linear_algebra.h:85
NetworkConfigRT_s::numNAssigned
int numNAssigned
number of total neurons assigned to the local network
Definition: snn_datastructures.h:642
SNN::setESTDP
void setESTDP(int grpId, bool isSet, STDPType type, STDPCurve curve, float alphaPlus, float tauPlus, float alphaMinus, float tauMinus, float gamma)
Set the spike-timing-dependent plasticity (STDP) for a neuron group.
Definition: snn_manager.cpp:621
GlobalNetworkConfig_s::numNExcReg
int numNExcReg
number of regular excitatory neurons in the global network
Definition: snn_datastructures.h:603
GroupConfigRT_s::gStartN
int gStartN
published by GroupConfigMD
Definition: snn_datastructures.h:377
GET_FIXED_PLASTIC
#define GET_FIXED_PLASTIC(a)
Definition: snn_definitions.h:208
NetworkConfigRT_s::sim_with_homeostasis
bool sim_with_homeostasis
Definition: snn_datastructures.h:667
RuntimeData_s::gGABAb
float * gGABAb
conductance of gGABAb
Definition: snn_datastructures.h:500
SpikeMonitorCore::getMode
SpikeMonMode getMode()
returns recording mode
Definition: spike_monitor_core.h:101
loggerMode_string
static const char * loggerMode_string[]
Definition: carlsim_datastructures.h:99
SILENT
@ SILENT
Silent mode, no output is generated.
Definition: carlsim_datastructures.h:95
EXECUTABLE_SNN
@ EXECUTABLE_SNN
Definition: snn_datastructures.h:82
ConnectConfig_s
The configuration of a connection.
Definition: snn_datastructures.h:120
RuntimeData_s::Izh_a
float * Izh_a
Definition: snn_datastructures.h:473
NetworkConfigRT_s::sim_in_testing
bool sim_in_testing
Definition: snn_datastructures.h:669
RuntimeData_s::lif_tau_m
int * lif_tau_m
parameters for a LIF spiking group
Definition: snn_datastructures.h:481
LONG_SPIKE_MON_DURATION
#define LONG_SPIKE_MON_DURATION
Definition: snn_definitions.h:160
GroupConfigRT_s::withParamModel_9
bool withParamModel_9
False = 4 parameter model; 1 = 9 parameter model.
Definition: snn_datastructures.h:440
ConnectionMonitorCore::setConnectFileId
void setConnectFileId(FILE *connFileId)
sets pointer to connection file
Definition: connection_monitor_core.cpp:415
ID_OVERFLOW_ERROR
#define ID_OVERFLOW_ERROR
Definition: error_code.h:97
LoggerMode
LoggerMode
Logger modes.
Definition: carlsim_datastructures.h:91
SpikeBuffer
Circular buffer for delivering spikes.
Definition: spike_buffer.h:66
GroupConfigRT_s::STP_tau_x_inv
float STP_tau_x_inv
published by GroupConfig
Definition: snn_datastructures.h:407
SNN::getNumSynapticConnections
int getNumSynapticConnections(short int connectionId)
gets number of connections associated with a connection ID
Definition: snn_manager.cpp:1949
RuntimeData_s::avgFiring
float * avgFiring
Definition: snn_datastructures.h:567
Grid3D::distZ
float distZ
Definition: carlsim_datastructures.h:549
GroupConfigRT_s::ALPHA_MINUS_INB
float ALPHA_MINUS_INB
published by GroupConfig
Definition: snn_datastructures.h:418
NetworkConfigRT_s::numGroups
int numGroups
number of local groups in this local network
Definition: snn_datastructures.h:656
GroupConfig_s::isSpikeGenerator
bool isSpikeGenerator
Definition: snn_datastructures.h:311
RadiusRF::radY
double radY
Definition: carlsim_datastructures.h:373
RuntimeData_s::grpDA
float * grpDA
Definition: snn_datastructures.h:572
ConnectConfig_s::connProbability
float connProbability
connection probability
Definition: snn_datastructures.h:135
RuntimeData_s::extFiringTableD1
int ** extFiringTableD1
external firing table, only used on GPU
Definition: snn_datastructures.h:549
GlobalNetworkConfig_s::numN
int numN
number of neurons in the global network
Definition: snn_datastructures.h:602
SNN::isSimulationWithCOBA
bool isSimulationWithCOBA()
Definition: snn.h:634
CONFIG_SNN
@ CONFIG_SNN
Definition: snn_datastructures.h:79
NeuronMonitorCore::setLastUpdated
void setLastUpdated(long int lastUpdate)
sets timestamp of last NeuronMonitor update
Definition: neuron_monitor_core.h:97
SNN::getSimTime
int getSimTime()
Definition: snn.h:581
SNN::setNeuronParameters
void setNeuronParameters(int grpId, float izh_a, float izh_a_sd, float izh_b, float izh_b_sd, float izh_c, float izh_c_sd, float izh_d, float izh_d_sd)
Sets the Izhikevich parameters a, b, c, and d of a neuron group.
Definition: snn_manager.cpp:505
NeuronMonitor
Definition: neuron_monitor.h:59
SNN::createGroupLIF
int createGroupLIF(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a group of LIF spiking neurons.
Definition: snn_manager.cpp:302
SynInfo_s::nId
int nId
neuron id
Definition: snn_datastructures.h:92
GroupConfigRT_s::WithESTDPcurve
STDPCurve WithESTDPcurve
published by GroupConfig
Definition: snn_datastructures.h:396
GroupSTDPInfo_s::WithESTDP
bool WithESTDP
enable E-STDP flag
Definition: carlsim_datastructures.h:414
GroupConfigRT_s::lEndN
int lEndN
published by GroupConfigMD
Definition: snn_datastructures.h:381
SYN_FIXED
#define SYN_FIXED
Definition: carlsim_definitions.h:60
SNN::setHomeoBaseFiringRate
void setHomeoBaseFiringRate(int groupId, float baseFiring, float baseFiringSD)
Sets homeostatic target firing rate (enforced through homeostatic synaptic scaling)
Definition: snn_manager.cpp:481
NetworkConfigRT_s::numNReg
int numNReg
number of regular (spking) neurons
Definition: snn_datastructures.h:634
SNN::getGroupName
std::string getGroupName(int grpId)
Definition: snn_manager.cpp:1865
GroupSTDPInfo_s::TAU_PLUS_INV_INB
float TAU_PLUS_INV_INB
the inverse of tau plus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:424
GroupMonitorCore::setGroupFileId
void setGroupFileId(FILE *groupFileId)
sets pointer to group data file
Definition: group_monitor_core.cpp:223
ConnectionMonitor
Class ConnectionMonitor.
Definition: connection_monitor.h:149
SNN::isGroupWithHomeostasis
bool isGroupWithHomeostasis(int grpId)
returns whether group has homeostasis enabled (true) or not (false)
Definition: snn_manager.cpp:4894
ConnectConfig_s::type
conType_t type
Definition: snn_datastructures.h:134
RuntimeData_s::extFiringTableEndIdxD2
int * extFiringTableEndIdxD2
Definition: snn_datastructures.h:553
RuntimeData_s::memType
MemType memType
Definition: snn_datastructures.h:506
GroupSTDPInfo_s::WithESTDPcurve
STDPCurve WithESTDPcurve
the E-STDP curve
Definition: carlsim_datastructures.h:418
Grid3D::N
int N
Definition: carlsim_datastructures.h:551
GlobalNetworkConfig_s::numNInhReg
int numNInhReg
number of regular inhibitory neurons in the global network
Definition: snn_datastructures.h:604
RuntimeData_s::grpIds
short int * grpIds
Definition: snn_datastructures.h:531
SNN::exitSimulation
void exitSimulation(int val=1)
deallocates all dynamical structures and exits
Definition: snn_manager.cpp:995
GroupConfigRT_s::KAPPA
float KAPPA
published by GroupConfig
Definition: snn_datastructures.h:413
ConnectConfig_s::maxWt
float maxWt
Definition: snn_datastructures.h:125
SpikeBuffer::SpikeNode::grpId
int grpId
corresponding global group Id
Definition: spike_buffer.h:92
snn.h
GroupConfigRT_s::STP_A
float STP_A
published by GroupConfig
Definition: snn_datastructures.h:404
GroupConfigRT_s::ALPHA_PLUS_EXC
float ALPHA_PLUS_EXC
published by GroupConfig
Definition: snn_datastructures.h:410
TARGET_NMDA
#define TARGET_NMDA
Definition: carlsim_definitions.h:68
SpikeMonitorCore::setSpikeFileId
void setSpikeFileId(FILE *spikeFileId)
sets pointer to spike file
Definition: spike_monitor_core.cpp:360
NetworkConfigRT_s::sim_with_fixedwts
bool sim_with_fixedwts
Definition: snn_datastructures.h:662
GlobalNetworkConfig_s::numNReg
int numNReg
number of regular (spking) neurons in the global network
Definition: snn_datastructures.h:605
GroupNeuromodulatorInfo_s::decay5HT
float decay5HT
decay rate for Serotonin
Definition: carlsim_datastructures.h:450
RuntimeData_s::stpu
float * stpu
Definition: snn_datastructures.h:513
ConnectionMonitorCore::writeConnectFileSnapshot
void writeConnectFileSnapshot(int simTimeMs, std::vector< std::vector< float > > wts)
writes each snapshot to connect file
Definition: connection_monitor_core.cpp:518
GroupConfigRT_s::LtoGOffset
int LtoGOffset
published by GroupConfigMD
Definition: snn_datastructures.h:382
SNN
Contains all of CARLsim's core functionality.
Definition: snn.h:115
ConnectionInfo_s::srcGLoffset
int srcGLoffset
Definition: snn_datastructures.h:100
SNN::getConductanceGABAb
std::vector< float > getConductanceGABAb(int grpId)
Definition: snn_manager.cpp:1774
SNN::getWeightRange
RangeWeight getWeightRange(short int connId)
returns RangeWeight struct of a connection
Definition: snn_manager.cpp:2003
SynInfo_s::gsId
int gsId
group id and synapse id
Definition: snn_datastructures.h:91
SpikeBuffer::front
SpikeIterator front(int stepOffset=0)
pointer to the front of the spike buffer
Definition: spike_buffer.cpp:224
RuntimeData_s::lif_bias
float * lif_bias
Definition: snn_datastructures.h:487
RuntimeData_s::grpNE
float * grpNE
Definition: snn_datastructures.h:575
ConnectConfig_s::numberOfConnections
int numberOfConnections
Definition: snn_datastructures.h:137
GroupConfig_s
The configuration of a group.
Definition: snn_datastructures.h:302
RuntimeData_s::nSpikeCnt
int * nSpikeCnt
homeostatic plasticity variables
Definition: snn_datastructures.h:562
GroupNeuromodulatorInfo_s
A struct for retrieving neuromodulator information of a group.
Definition: carlsim_datastructures.h:444
RuntimeData_s::extFiringTableD2
int ** extFiringTableD2
external firing table, only used on GPU
Definition: snn_datastructures.h:550
NetworkConfigRT_s::numGroupsAssigned
int numGroupsAssigned
number of groups assigned to this local network
Definition: snn_datastructures.h:657
TARGET_AMPA
#define TARGET_AMPA
Definition: carlsim_definitions.h:67
CONN_FULL_NO_DIRECT
@ CONN_FULL_NO_DIRECT
Definition: snn_datastructures.h:75
ConnectionInfo_s::maxWt
float maxWt
Definition: snn_datastructures.h:102
DelayInfo_s::delay_length
short delay_length
Definition: snn_datastructures.h:87
GroupSTDPInfo_s::WithESTDPtype
STDPType WithESTDPtype
the type of E-STDP (STANDARD or DA_MOD)
Definition: carlsim_datastructures.h:416
RuntimeData_s::gNMDA_d
float * gNMDA_d
Definition: snn_datastructures.h:497
IS_INHIBITORY_TYPE
#define IS_INHIBITORY_TYPE(type)
Definition: carlsim_definitions.h:81
UpdateInterval
UpdateInterval
Update frequency for weights.
Definition: carlsim_datastructures.h:238
SNN::isExcitatoryGroup
bool isExcitatoryGroup(int gGrpId)
Definition: snn.h:621
GroupConfigRT_s::TAU_PLUS_INV_INB
float TAU_PLUS_INV_INB
published by GroupConfig
Definition: snn_datastructures.h:415
GroupMonitorCore::getLastUpdated
int getLastUpdated()
returns timestamp of last GroupMonitor update
Definition: group_monitor_core.h:144
GroupConfigRT_s::FixedInputWts
bool FixedInputWts
published by GroupConfigMD
Definition: snn_datastructures.h:399
NeuronMonitorCore::getNeuronFileId
FILE * getNeuronFileId()
returns a pointer to the neuron state file
Definition: neuron_monitor_core.h:91
SNN::getDelays
uint8_t * getDelays(int gGrpIdPre, int gGrpIdPost, int &numPreN, int &numPostN)
Returns the delay information for all synaptic connections between a pre-synaptic and a post-synaptic...
Definition: snn_manager.cpp:1802
MAX_SPIKE_MON_BUFFER_SIZE
#define MAX_SPIKE_MON_BUFFER_SIZE
Definition: snn_definitions.h:159
GroupConfigRT_s::decayNE
float decayNE
decay rate for Noradrenaline, published by GroupConfig
Definition: snn_datastructures.h:438
GroupConfig_s::isLIF
bool isLIF
Definition: snn_datastructures.h:313
RuntimeData_s::grp5HTBuffer
float * grp5HTBuffer
Definition: snn_datastructures.h:579
NetworkConfigRT_s::sim_with_NMDA_rise
bool sim_with_NMDA_rise
a flag to inform whether to compute NMDA rise time
Definition: snn_datastructures.h:680
ConnectionMonitorCore::init
void init()
Definition: connection_monitor_core.cpp:85
SNN::setISTDP
void setISTDP(int grpId, bool isSet, STDPType type, STDPCurve curve, float ab1, float ab2, float tau1, float tau2)
Set the inhibitory spike-timing-dependent plasticity (STDP) with anti-hebbian curve for a neuron grou...
Definition: snn_manager.cpp:654
SNN::setConnectionMonitor
ConnectionMonitor * setConnectionMonitor(int grpIdPre, int grpIdPost, FILE *fid)
sets up a network monitor registered with a callback to process the spikes.
Definition: snn_manager.cpp:1125
SpikeMonitorCore::getLastUpdated
long int getLastUpdated()
returns timestamp of last SpikeMonitor update
Definition: spike_monitor_core.h:185
GroupSTDPInfo_s::BETA_LTP
float BETA_LTP
the amplitude of inhibitory LTP if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:429
GroupConfig_s::withCompartments
bool withCompartments
Definition: snn_datastructures.h:314
RoutingTableEntry_s
runtime spike routing table entry
Definition: snn_datastructures.h:702
RuntimeData_s::grp5HT
float * grp5HT
Definition: snn_datastructures.h:573
GroupSTDPInfo_s::ALPHA_MINUS_INB
float ALPHA_MINUS_INB
the amplitude of alpha minus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:427
GlobalNetworkConfig_s::simNumStepsPerMs
int simNumStepsPerMs
number of steps per 1 millisecond
Definition: snn_datastructures.h:616
GroupSTDPInfo_s::LAMBDA
float LAMBDA
the range of inhibitory LTP if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:431
SNN::getConnectId
short int getConnectId(int grpIdPre, int grpIdPost)
find connection ID based on pre-post group pair, O(N)
Definition: snn_manager.cpp:1704
RuntimeData_s::firingTableD2
int * firingTableD2
Definition: snn_datastructures.h:547
RuntimeData_s::gGABAb_r
float * gGABAb_r
Definition: snn_datastructures.h:501
ConnectionInfo_s
Definition: snn_datastructures.h:95
GroupConfigMD_s
Definition: snn_datastructures.h:329
RuntimeData_s::spikeCountLastSecLeftD2
unsigned int spikeCountLastSecLeftD2
the nubmer of spike left in the last second, used in CPU_MODE currently
Definition: snn_datastructures.h:461
GroupConfigRT_s::ALPHA_MINUS_EXC
float ALPHA_MINUS_EXC
published by GroupConfig
Definition: snn_datastructures.h:411
GroupConfigRT_s::STP_tau_u_inv
float STP_tau_u_inv
published by GroupConfig
Definition: snn_datastructures.h:406
SpikeBuffer::reset
void reset(int minDelay, int maxDelay)
Reset buffer data.
Definition: spike_buffer.cpp:222
Grid3D::numZ
int numZ
Definition: carlsim_datastructures.h:548
ThreadStruct_s::lGrpId
int lGrpId
Definition: snn_datastructures.h:726
GroupConfigRT_s::compCouplingDown
float compCouplingDown
Definition: snn_datastructures.h:445
NetworkConfigRT_s::simNumStepsPerMs
int simNumStepsPerMs
number of steps per 1 millisecond
Definition: snn_datastructures.h:692
GroupConfigRT_s::TAU_MINUS_INV_EXC
float TAU_MINUS_INV_EXC
published by GroupConfig
Definition: snn_datastructures.h:409
ConnectionMonitorCore::getUpdateTimeIntervalSec
int getUpdateTimeIntervalSec()
Definition: connection_monitor_core.h:131
MAX_NET_PER_SNN
#define MAX_NET_PER_SNN
Definition: snn_definitions.h:134
spike_monitor_core.h
SNN::getGroupGrid3D
Grid3D getGroupGrid3D(int grpId)
Definition: snn_manager.cpp:1846
NetworkConfigRT_s::numN
int numN
number of neurons in th local network
Definition: snn_datastructures.h:631
GET_CONN_NEURON_ID
#define GET_CONN_NEURON_ID(val)
Definition: snn_definitions.h:190
GlobalNetworkConfig_s::timeStep
float timeStep
inverse of simNumStepsPerMs
Definition: snn_datastructures.h:617
GroupConfigRT_s::STP_U
float STP_U
published by GroupConfig
Definition: snn_datastructures.h:405
DEVELOPER
@ DEVELOPER
Developer mode, for developing and debugging code.
Definition: carlsim_datastructures.h:93
RuntimeData_s::grpACh
float * grpACh
Definition: snn_datastructures.h:574
SNN::setSTP
void setSTP(int grpId, bool isSet, float STP_U, float STP_tau_u, float STP_tau_x)
Sets STP params U, tau_u, and tau_x of a neuron group (pre-synaptically) CARLsim implements the short...
Definition: snn_manager.cpp:700
SpikeBuffer::SpikeIterator
Iterator to loop over the scheduled spikes at a certain delay.
Definition: spike_buffer.h:98
GroupSTDPInfo_s::GAMMA
float GAMMA
the turn over point if the timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:428
GlobalNetworkConfig_s::numComp
int numComp
number of compartmental neurons
Definition: snn_datastructures.h:606
GroupConfig_s::grpName
std::string grpName
Definition: snn_datastructures.h:307
GroupConfigRT_s::BETA_LTP
float BETA_LTP
published by GroupConfig
Definition: snn_datastructures.h:419
RadiusRF
A struct to specify the receptive field (RF) radius in 3 dimensions.
Definition: carlsim_datastructures.h:364
MAX_SYN_DELAY
#define MAX_SYN_DELAY
Definition: snn_definitions.h:128
ANY
#define ANY
used for create* method to specify any GPU or a specific GPU
Definition: carlsim_definitions.h:57
TARGET_GABAa
#define TARGET_GABAa
Definition: carlsim_definitions.h:69
GroupConfigRT_s::WithSTP
bool WithSTP
published by GroupConfig
Definition: snn_datastructures.h:390
SNN::getDelayRange
RangeDelay getDelayRange(short int connId)
returns the RangeDelay struct of a connection
Definition: snn_manager.cpp:1795
GET_CONN_SYN_ID
#define GET_CONN_SYN_ID(val)
Definition: snn_definitions.h:191
NetworkConfigRT_s::timeStep
float timeStep
inverse of simNumStepsPerMs
Definition: snn_datastructures.h:693
GroupSTDPInfo_s::WithISTDP
bool WithISTDP
enable I-STDP flag
Definition: carlsim_datastructures.h:415
RadiusRF::radZ
double radZ
Definition: carlsim_datastructures.h:373
GroupConfigRT_s::homeostasisScale
float homeostasisScale
published by GroupConfig
Definition: snn_datastructures.h:428
GroupConfigRT_s::DELTA
float DELTA
published by GroupConfig
Definition: snn_datastructures.h:422
GroupConfigRT_s::avgTimeScale
float avgTimeScale
published by GroupConfig
Definition: snn_datastructures.h:425
GroupConfigRT_s::BETA_LTD
float BETA_LTD
published by GroupConfig
Definition: snn_datastructures.h:420
GroupSTDPInfo_s::WithISTDPcurve
STDPCurve WithISTDPcurve
the I-STDP curve
Definition: carlsim_datastructures.h:419
SNN::updateSpikeMonitor
void updateSpikeMonitor(int grpId=ALL)
copy required spikes from firing buffer to spike buffer
Definition: snn_manager.cpp:6479
RuntimeData_s::lif_vReset
float * lif_vReset
Definition: snn_datastructures.h:485
ConnectConfig_s::connRadius
RadiusRF connRadius
Definition: snn_datastructures.h:128
ConnectConfig_s::conn
ConnectionGeneratorCore * conn
Definition: snn_datastructures.h:133
STDPType
STDPType
STDP flavors.
Definition: carlsim_datastructures.h:161
GroupConfigRT_s::WithISTDP
bool WithISTDP
published by GroupConfig
Definition: snn_datastructures.h:393
RuntimeData_s::cumulativePre
unsigned int * cumulativePre
Definition: snn_datastructures.h:528
SNN::getNumConnections
int getNumConnections()
Definition: snn.h:566
Point3D::x
double x
Definition: linear_algebra.h:85
RuntimeData_s::gGABAb_d
float * gGABAb_d
Definition: snn_datastructures.h:502
RuntimeData_s::allocated
bool allocated
true if all data has been allocated
Definition: snn_datastructures.h:507
Grid3D::offsetY
float offsetY
Definition: carlsim_datastructures.h:550
CONN_ONE_TO_ONE
@ CONN_ONE_TO_ONE
Definition: snn_datastructures.h:75
CPU_CORES
@ CPU_CORES
Definition: carlsim_datastructures.h:149
GroupSTDPInfo_s::ALPHA_PLUS_INB
float ALPHA_PLUS_INB
the amplitude of alpha plus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:426
GroupMonitorCore::pushData
void pushData(int time, float data)
inserts group data (time, value) into the vectors
Definition: group_monitor_core.cpp:111
GroupNeuromodulatorInfo_s::decayDP
float decayDP
decay rate for Dopaamine
Definition: carlsim_datastructures.h:449
Point3D::z
double z
Definition: linear_algebra.h:85