CARLsim  4.1.0
CARLsim: a GPU-accelerated SNN simulator
snn_manager.cpp
Go to the documentation of this file.
1 /* * Copyright (c) 2016 Regents of the University of California. All rights reserved.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions
5 * are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * 3. The names of its contributors may not be used to endorse or promote
15 * products derived from this software without specific prior written
16 * permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
22 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * *********************************************************************************************** *
31 * CARLsim
32 * created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran
33 * maintained by:
34 * (MA) Mike Avery <averym@uci.edu>
35 * (MB) Michael Beyeler <mbeyeler@uci.edu>,
36 * (KDC) Kristofor Carlson <kdcarlso@uci.edu>
37 * (TSC) Ting-Shuo Chou <tingshuc@uci.edu>
38 * (HK) Hirak J Kashyap <kashyaph@uci.edu>
39 *
40 * CARLsim v1.0: JM, MDR
41 * CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC
42 * CARLsim3: MB, KDC, TSC
43 * CARLsim4: TSC, HK
44 *
45 * CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/
46 * Ver 12/31/2016
47 */
48 
49 #include <snn.h>
50 #include <sstream>
51 #include <algorithm>
52 
53 #include <connection_monitor.h>
55 #include <spike_monitor.h>
56 #include <spike_monitor_core.h>
57 #include <group_monitor.h>
58 #include <group_monitor_core.h>
59 #include <neuron_monitor.h>
60 #include <neuron_monitor_core.h>
61 
62 #include <spike_buffer.h>
63 #include <error_code.h>
64 
65 // \FIXME what are the following for? why were they all the way at the bottom of this file?
66 
67 #define COMPACTION_ALIGNMENT_PRE 16
68 #define COMPACTION_ALIGNMENT_POST 0
69 
73 
74 
75 // TODO: consider moving unsafe computations out of constructor
76 SNN::SNN(const std::string& name, SimMode preferredSimMode, LoggerMode loggerMode, int randSeed)
77  : networkName_(name), preferredSimMode_(preferredSimMode), loggerMode_(loggerMode),
78  randSeed_(SNN::setRandSeed(randSeed)) // all of these are const
79 {
80  // move all unsafe operations out of constructor
81  SNNinit();
82 }
83 
84 // destructor
86  if (!simulatorDeleted)
87  deleteObjects();
88 }
89 
93 
94 // make from each neuron in grpId1 to 'numPostSynapses' neurons in grpId2
95 short int SNN::connect(int grpId1, int grpId2, const std::string& _type, float initWt, float maxWt, float prob,
96  uint8_t minDelay, uint8_t maxDelay, RadiusRF radius,
97  float _mulSynFast, float _mulSynSlow, bool synWtType) {
98  //const std::string& wtType
99  int retId=-1;
100  assert(grpId1 < numGroups);
101  assert(grpId2 < numGroups);
102  assert(minDelay <= maxDelay);
103  assert(!isPoissonGroup(grpId2));
104 
105  //* \deprecated Do these ramp thingies still work?
106 // bool useRandWts = (wtType.find("random") != std::string::npos);
107 // bool useRampDownWts = (wtType.find("ramp-down") != std::string::npos);
108 // bool useRampUpWts = (wtType.find("ramp-up") != std::string::npos);
109 // uint32_t connProp = SET_INITWTS_RANDOM(useRandWts)
110 // | SET_CONN_PRESENT(1)
111 // | SET_FIXED_PLASTIC(synWtType)
112 // | SET_INITWTS_RAMPUP(useRampUpWts)
113 // | SET_INITWTS_RAMPDOWN(useRampDownWts);
114  uint32_t connProp = SET_CONN_PRESENT(1) | SET_FIXED_PLASTIC(synWtType);
115 
116  Grid3D szPre = getGroupGrid3D(grpId1);
117  Grid3D szPost = getGroupGrid3D(grpId2);
118 
119  // initialize configuration of a connection
120  ConnectConfig connConfig;
121 
122  connConfig.grpSrc = grpId1;
123  connConfig.grpDest = grpId2;
124  connConfig.initWt = initWt;
125  connConfig.maxWt = maxWt;
126  connConfig.maxDelay = maxDelay;
127  connConfig.minDelay = minDelay;
128 // newInfo->radX = (radX<0) ? MAX(szPre.x,szPost.x) : radX; // <0 means full connectivity, so the
129 // newInfo->radY = (radY<0) ? MAX(szPre.y,szPost.y) : radY; // effective group size is Grid3D.x. Grab
130 // newInfo->radZ = (radZ<0) ? MAX(szPre.z,szPost.z) : radZ; // the larger of pre / post to connect all
131  connConfig.connRadius = radius;
132  connConfig.mulSynFast = _mulSynFast;
133  connConfig.mulSynSlow = _mulSynSlow;
134  connConfig.connProp = connProp;
135  connConfig.connProbability = prob;
136  connConfig.type = CONN_UNKNOWN;
137  connConfig.connectionMonitorId = -1;
138  connConfig.connId = -1;
139  connConfig.conn = NULL;
140  connConfig.numberOfConnections = 0;
141 
142  if ( _type.find("random") != std::string::npos) {
143  connConfig.type = CONN_RANDOM;
144  }
145  //so you're setting the size to be prob*Number of synapses in group info + some standard deviation ...
146  else if ( _type.find("full-no-direct") != std::string::npos) {
147  connConfig.type = CONN_FULL_NO_DIRECT;
148  }
149  else if ( _type.find("full") != std::string::npos) {
150  connConfig.type = CONN_FULL;
151  }
152  else if ( _type.find("one-to-one") != std::string::npos) {
153  connConfig.type = CONN_ONE_TO_ONE;
154  } else if ( _type.find("gaussian") != std::string::npos) {
155  connConfig.type = CONN_GAUSSIAN;
156  } else {
157  KERNEL_ERROR("Invalid connection type (should be 'random', 'full', 'one-to-one', 'full-no-direct', or 'gaussian')");
158  exitSimulation(-1);
159  }
160 
161  // assign connection id
162  assert(connConfig.connId == -1);
163  connConfig.connId = numConnections;
164 
165  KERNEL_DEBUG("CONNECT SETUP: connId=%d, mulFast=%f, mulSlow=%f", connConfig.connId, connConfig.mulSynFast, connConfig.mulSynSlow);
166 
167  // store the configuration of a connection
168  connectConfigMap[numConnections] = connConfig; // connConfig.connId == numConnections
169 
170  assert(numConnections < MAX_CONN_PER_SNN); // make sure we don't overflow connId
171  numConnections++;
172 
173  return (numConnections - 1);
174 }
175 
176 // make custom connections from grpId1 to grpId2
177 short int SNN::connect(int grpId1, int grpId2, ConnectionGeneratorCore* conn, float _mulSynFast, float _mulSynSlow,
178  bool synWtType) {
179  int retId=-1;
180 
181  assert(grpId1 < numGroups);
182  assert(grpId2 < numGroups);
183 
184  // initialize the configuration of a connection
185  ConnectConfig connConfig;
186 
187  connConfig.grpSrc = grpId1;
188  connConfig.grpDest = grpId2;
189  connConfig.initWt = 0.0f;
190  connConfig.maxWt = 0.0f;
191  connConfig.maxDelay = MAX_SYN_DELAY;
192  connConfig.minDelay = 1;
193  connConfig.mulSynFast = _mulSynFast;
194  connConfig.mulSynSlow = _mulSynSlow;
195  connConfig.connProp = SET_CONN_PRESENT(1) | SET_FIXED_PLASTIC(synWtType);
196  connConfig.type = CONN_USER_DEFINED;
197  connConfig.conn = conn;
198  connConfig.connectionMonitorId = -1;
199  connConfig.connId = -1;
200  connConfig.numberOfConnections = 0;
201 
202  // assign a connection id
203  assert(connConfig.connId == -1);
204  connConfig.connId = numConnections;
205 
206  // store the configuration of a connection
207  connectConfigMap[numConnections] = connConfig; // connConfig.connId == numConnections
208 
209  assert(numConnections < MAX_CONN_PER_SNN); // make sure we don't overflow connId
210  numConnections++;
211 
212  return (numConnections - 1);
213 }
214 
215 // make a compartmental connection between two groups
216 short int SNN::connectCompartments(int grpIdLower, int grpIdUpper) {
217  assert(grpIdLower >= 0 && grpIdLower < numGroups);
218  assert(grpIdUpper >= 0 && grpIdUpper < numGroups);
219  assert(grpIdLower != grpIdUpper);
220  assert(!isPoissonGroup(grpIdLower));
221  assert(!isPoissonGroup(grpIdUpper));
222 
223  // the two groups must be located on the same partition
224  assert(groupConfigMap[grpIdLower].preferredNetId == groupConfigMap[grpIdUpper].preferredNetId);
225 
226  // this flag must be set if any compartmental connections exist
227  // note that grpId.withCompartments is not necessarily set just yet, this will be done in
228  // CpuSNN::setCompartmentParameters
229  sim_with_compartments = true;
230 
231  compConnectConfig compConnConfig;
232 
233  compConnConfig.grpSrc = grpIdLower;
234  compConnConfig.grpDest = grpIdUpper;
235  compConnConfig.connId = -1;
236 
237  // assign a connection id
238  assert(compConnConfig.connId == -1);
239  compConnConfig.connId = numCompartmentConnections;
240 
241  // store the configuration of a connection
242  compConnectConfigMap[numCompartmentConnections] = compConnConfig;
243 
244  numCompartmentConnections++;
245 
246  return (numCompartmentConnections - 1);
247 }
248 
249 // create group of Izhikevich neurons
250 // use int for nNeur to avoid arithmetic underflow
251 int SNN::createGroup(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
252  assert(grid.numX * grid.numY * grid.numZ > 0);
253  assert(neurType >= 0);
254  assert(numGroups < MAX_GRP_PER_SNN);
255 
256  if ( (!(neurType & TARGET_AMPA) && !(neurType & TARGET_NMDA) &&
257  !(neurType & TARGET_GABAa) && !(neurType & TARGET_GABAb)) || (neurType & POISSON_NEURON)) {
258  KERNEL_ERROR("Invalid type using createGroup... Cannot create poisson generators here.");
259  exitSimulation(1);
260  }
261 
262  // initialize group configuration
263  GroupConfig grpConfig;
264  GroupConfigMD grpConfigMD;
265 
266  //All groups are non-compartmental by default
267  grpConfig.withCompartments = false;
268 
269  // init parameters of neural group size and location
270  grpConfig.grpName = grpName;
271  grpConfig.type = neurType;
272  grpConfig.numN = grid.N;
273 
274  grpConfig.isSpikeGenerator = false;
275  grpConfig.grid = grid;
276  grpConfig.isLIF = false;
277 
278  if (preferredPartition == ANY) {
279  grpConfig.preferredNetId = ANY;
280  } else if (preferredBackend == CPU_CORES) {
281  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
282  } else {
283  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
284  }
285 
286  // assign a global group id
287  grpConfigMD.gGrpId = numGroups;
288 
289  // store the configuration of a group
290  groupConfigMap[numGroups] = grpConfig; // numGroups == grpId
291  groupConfigMDMap[numGroups] = grpConfigMD;
292 
293  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
294  numGroups++;
295 
296  return grpConfigMD.gGrpId;
297 }
298 
299 // create group of LIF neurons
300 // use int for nNeur to avoid arithmetic underflow
301 int SNN::createGroupLIF(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
302  assert(grid.numX * grid.numY * grid.numZ > 0);
303  assert(neurType >= 0);
304  assert(numGroups < MAX_GRP_PER_SNN);
305 
306  if ( (!(neurType & TARGET_AMPA) && !(neurType & TARGET_NMDA) &&
307  !(neurType & TARGET_GABAa) && !(neurType & TARGET_GABAb)) || (neurType & POISSON_NEURON)) {
308  KERNEL_ERROR("Invalid type using createGroup... Cannot create poisson generators here.");
309  exitSimulation(1);
310  }
311 
312  // initialize group configuration
313  GroupConfig grpConfig;
314  GroupConfigMD grpConfigMD;
315 
316  // init parameters of neural group size and location
317  grpConfig.grpName = grpName;
318  grpConfig.type = neurType;
319  grpConfig.numN = grid.N;
320 
321  grpConfig.isLIF = true;
322  grpConfig.isSpikeGenerator = false;
323  grpConfig.grid = grid;
324 
325  if (preferredPartition == ANY) {
326  grpConfig.preferredNetId = ANY;
327  } else if (preferredBackend == CPU_CORES) {
328  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
329  } else {
330  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
331  }
332 
333  // assign a global group id
334  grpConfigMD.gGrpId = numGroups;
335 
336  // store the configuration of a group
337  groupConfigMap[numGroups] = grpConfig; // numGroups == grpId
338  groupConfigMDMap[numGroups] = grpConfigMD;
339 
340  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
341  numGroups++;
342 
343  return grpConfigMD.gGrpId;
344 }
345 
346 // create spike generator group
347 // use int for nNeur to avoid arithmetic underflow
348 int SNN::createSpikeGeneratorGroup(const std::string& grpName, const Grid3D& grid, int neurType, int preferredPartition, ComputingBackend preferredBackend) {
349  assert(grid.numX * grid.numY * grid.numZ > 0);
350  assert(neurType >= 0);
351  assert(numGroups < MAX_GRP_PER_SNN);
352 
353  // initialize group configuration
354  GroupConfig grpConfig;
355  GroupConfigMD grpConfigMD;
356 
357  //All groups are non-compartmental by default FIXME:IS THIS NECESSARY?
358  grpConfig.withCompartments = false;
359 
360  // init parameters of neural group size and location
361  grpConfig.grpName = grpName;
362  grpConfig.type = neurType | POISSON_NEURON;
363  grpConfig.numN = grid.N;
364  grpConfig.isSpikeGenerator = true;
365  grpConfig.grid = grid;
366  grpConfig.isLIF = false;
367 
368  if (preferredPartition == ANY) {
369  grpConfig.preferredNetId = ANY;
370  }
371  else if (preferredBackend == CPU_CORES) {
372  grpConfig.preferredNetId = preferredPartition + CPU_RUNTIME_BASE;
373  }
374  else {
375  grpConfig.preferredNetId = preferredPartition + GPU_RUNTIME_BASE;
376  }
377 
378  // assign a global group id
379  grpConfigMD.gGrpId = numGroups;
380 
381  // store the configuration of a group
382  groupConfigMap[numGroups] = grpConfig;
383  groupConfigMDMap[numGroups] = grpConfigMD;
384 
385  assert(numGroups < MAX_GRP_PER_SNN); // make sure we don't overflow connId
386  numGroups++;
387  numSpikeGenGrps++;
388 
389  return grpConfigMD.gGrpId;
390 }
391 
392 void SNN::setCompartmentParameters(int gGrpId, float couplingUp, float couplingDown) {
393  if (gGrpId == ALL) {
394  for (int grpId = 0; grpId<numGroups; grpId++) {
395  setCompartmentParameters(grpId, couplingUp, couplingDown);
396  }
397  }
398  else {
399  groupConfigMap[gGrpId].withCompartments = true;
400  groupConfigMap[gGrpId].compCouplingUp = couplingUp;
401  groupConfigMap[gGrpId].compCouplingDown = couplingDown;
402  glbNetworkConfig.numComp += groupConfigMap[gGrpId].numN;
403  }
404 }
405 
406 
407 // set conductance values for a simulation (custom values or disable conductances alltogether)
408 void SNN::setConductances(bool isSet, int tdAMPA, int trNMDA, int tdNMDA, int tdGABAa, int trGABAb, int tdGABAb) {
409  if (isSet) {
410  assert(tdAMPA>0); assert(tdNMDA>0); assert(tdGABAa>0); assert(tdGABAb>0);
411  assert(trNMDA>=0); assert(trGABAb>=0); // 0 to disable rise times
412  assert(trNMDA!=tdNMDA); assert(trGABAb!=tdGABAb); // singularity
413  }
414 
415  // set conductances globally for all connections
416  sim_with_conductances |= isSet;
417  dAMPA = 1.0-1.0/tdAMPA;
418  dNMDA = 1.0-1.0/tdNMDA;
419  dGABAa = 1.0-1.0/tdGABAa;
420  dGABAb = 1.0-1.0/tdGABAb;
421 
422  if (trNMDA>0) {
423  // use rise time for NMDA
424  sim_with_NMDA_rise = true;
425  rNMDA = 1.0-1.0/trNMDA;
426 
427  // compute max conductance under this model to scale it back to 1
428  // otherwise the peak conductance will not be equal to the weight
429  double tmax = (-tdNMDA*trNMDA*log(1.0*trNMDA/tdNMDA))/(tdNMDA-trNMDA); // t at which cond will be max
430  sNMDA = 1.0/(exp(-tmax/tdNMDA)-exp(-tmax/trNMDA)); // scaling factor, 1 over max amplitude
431  assert(!isinf(tmax) && !isnan(tmax) && tmax>=0);
432  assert(!isinf(sNMDA) && !isnan(sNMDA) && sNMDA>0);
433  }
434 
435  if (trGABAb>0) {
436  // use rise time for GABAb
437  sim_with_GABAb_rise = true;
438  rGABAb = 1.0-1.0/trGABAb;
439 
440  // compute max conductance under this model to scale it back to 1
441  // otherwise the peak conductance will not be equal to the weight
442  double tmax = (-tdGABAb*trGABAb*log(1.0*trGABAb/tdGABAb))/(tdGABAb-trGABAb); // t at which cond will be max
443  sGABAb = 1.0/(exp(-tmax/tdGABAb)-exp(-tmax/trGABAb)); // scaling factor, 1 over max amplitude
444  assert(!isinf(tmax) && !isnan(tmax)); assert(!isinf(sGABAb) && !isnan(sGABAb) && sGABAb>0);
445  }
446 
447  if (sim_with_conductances) {
448  KERNEL_INFO("Running COBA mode:");
449  KERNEL_INFO(" - AMPA decay time = %5d ms", tdAMPA);
450  KERNEL_INFO(" - NMDA rise time %s = %5d ms", sim_with_NMDA_rise?" ":"(disabled)", trNMDA);
451  KERNEL_INFO(" - GABAa decay time = %5d ms", tdGABAa);
452  KERNEL_INFO(" - GABAb rise time %s = %5d ms", sim_with_GABAb_rise?" ":"(disabled)",trGABAb);
453  KERNEL_INFO(" - GABAb decay time = %5d ms", tdGABAb);
454  } else {
455  KERNEL_INFO("Running CUBA mode (all synaptic conductances disabled)");
456  }
457 }
458 
459 // set homeostasis for group
460 void SNN::setHomeostasis(int gGrpId, bool isSet, float homeoScale, float avgTimeScale) {
461  if (gGrpId == ALL) { // shortcut for all groups
462  for(int grpId = 0; grpId < numGroups; grpId++) {
463  setHomeostasis(grpId, isSet, homeoScale, avgTimeScale);
464  }
465  } else {
466  // set conductances for a given group
467  sim_with_homeostasis |= isSet;
468  groupConfigMap[gGrpId].homeoConfig.WithHomeostasis = isSet;
469  groupConfigMap[gGrpId].homeoConfig.homeostasisScale = homeoScale;
470  groupConfigMap[gGrpId].homeoConfig.avgTimeScale = avgTimeScale;
471  groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv = 1.0f / avgTimeScale;
472  groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay = (avgTimeScale * 1000.0f - 1.0f) / (avgTimeScale * 1000.0f);
473 
474  KERNEL_INFO("Homeostasis parameters %s for %d (%s):\thomeoScale: %f, avgTimeScale: %f",
475  isSet?"enabled":"disabled", gGrpId, groupConfigMap[gGrpId].grpName.c_str(), homeoScale, avgTimeScale);
476  }
477 }
478 
479 // set a homeostatic target firing rate (enforced through homeostatic synaptic scaling)
480 void SNN::setHomeoBaseFiringRate(int gGrpId, float baseFiring, float baseFiringSD) {
481  if (gGrpId == ALL) { // shortcut for all groups
482  for(int grpId = 0; grpId < numGroups; grpId++) {
483  setHomeoBaseFiringRate(grpId, baseFiring, baseFiringSD);
484  }
485  } else {
486  // set homeostatsis for a given group
487  groupConfigMap[gGrpId].homeoConfig.baseFiring = baseFiring;
488  groupConfigMap[gGrpId].homeoConfig.baseFiringSD = baseFiringSD;
489 
490  KERNEL_INFO("Homeostatic base firing rate set for %d (%s):\tbaseFiring: %3.3f, baseFiringStd: %3.3f",
491  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), baseFiring, baseFiringSD);
492  }
493 }
494 
495 
496 void SNN::setIntegrationMethod(integrationMethod_t method, int numStepsPerMs) {
497  assert(numStepsPerMs >= 1 && numStepsPerMs <= 100);
498  glbNetworkConfig.simIntegrationMethod = method;
499  glbNetworkConfig.simNumStepsPerMs = numStepsPerMs;
500  glbNetworkConfig.timeStep = 1.0f / numStepsPerMs;
501 }
502 
503 // set Izhikevich parameters for group
504 void SNN::setNeuronParameters(int gGrpId, float izh_a, float izh_a_sd, float izh_b, float izh_b_sd,
505  float izh_c, float izh_c_sd, float izh_d, float izh_d_sd)
506 {
507  assert(gGrpId >= -1);
508  assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
509 
510  if (gGrpId == ALL) { // shortcut for all groups
511  for(int grpId = 0; grpId < numGroups; grpId++) {
512  setNeuronParameters(grpId, izh_a, izh_a_sd, izh_b, izh_b_sd, izh_c, izh_c_sd, izh_d, izh_d_sd);
513  }
514  } else {
515  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
516  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
517  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
518  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
519  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
520  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
521  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
522  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
523  groupConfigMap[gGrpId].withParamModel_9 = 0;
524  groupConfigMap[gGrpId].isLIF = 0;
525  }
526 }
527 
528 // set (9) Izhikevich parameters for group
529 void SNN::setNeuronParameters(int gGrpId, float izh_C, float izh_C_sd, float izh_k, float izh_k_sd,
530  float izh_vr, float izh_vr_sd, float izh_vt, float izh_vt_sd,
531  float izh_a, float izh_a_sd, float izh_b, float izh_b_sd,
532  float izh_vpeak, float izh_vpeak_sd, float izh_c, float izh_c_sd,
533  float izh_d, float izh_d_sd)
534 {
535  assert(gGrpId >= -1);
536  assert(izh_C_sd >= 0); assert(izh_k_sd >= 0); assert(izh_vr_sd >= 0);
537  assert(izh_vt_sd >= 0); assert(izh_a_sd >= 0); assert(izh_b_sd >= 0); assert(izh_vpeak_sd >= 0);
538  assert(izh_c_sd >= 0); assert(izh_d_sd >= 0);
539 
540  if (gGrpId == ALL) { // shortcut for all groups
541  for (int grpId = 0; grpId<numGroups; grpId++) {
542  setNeuronParameters(grpId, izh_C, izh_C_sd, izh_k, izh_k_sd, izh_vr, izh_vr_sd, izh_vt, izh_vt_sd,
543  izh_a, izh_a_sd, izh_b, izh_b_sd, izh_vpeak, izh_vpeak_sd, izh_c, izh_c_sd,
544  izh_d, izh_d_sd);
545  }
546  }
547  else {
548  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a = izh_a;
549  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd = izh_a_sd;
550  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b = izh_b;
551  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd = izh_b_sd;
552  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c = izh_c;
553  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd = izh_c_sd;
554  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d = izh_d;
555  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd = izh_d_sd;
556  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C = izh_C;
557  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd = izh_C_sd;
558  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k = izh_k;
559  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd = izh_k_sd;
560  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr = izh_vr;
561  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd = izh_vr_sd;
562  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt = izh_vt;
563  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd = izh_vt_sd;
564  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak = izh_vpeak;
565  groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd = izh_vpeak_sd;
566  groupConfigMap[gGrpId].withParamModel_9 = 1;
567  groupConfigMap[gGrpId].isLIF = 0;
568  KERNEL_INFO("Set a nine parameter group!");
569  }
570 }
571 
572 
573 // set LIF parameters for the group
574 void SNN::setNeuronParametersLIF(int gGrpId, int tau_m, int tau_ref, float vTh, float vReset, double minRmem, double maxRmem)
575 {
576  assert(gGrpId >= -1);
577  assert(tau_m >= 0); assert(tau_ref >= 0); assert(vReset < vTh);
578  assert(minRmem >= 0.0f); assert(minRmem <= maxRmem);
579 
580  if (gGrpId == ALL) { // shortcut for all groups
581  for(int grpId = 0; grpId < numGroups; grpId++) {
582  setNeuronParametersLIF(grpId, tau_m, tau_ref, vTh, vReset, minRmem, maxRmem);
583  }
584  } else {
585  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m = tau_m;
586  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref = tau_ref;
587  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh = vTh;
588  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset = vReset;
589  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem = minRmem;
590  groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem = maxRmem;
591  groupConfigMap[gGrpId].withParamModel_9 = 0;
592  groupConfigMap[gGrpId].isLIF = 1;
593  }
594 }
595 
596 void SNN::setNeuromodulator(int gGrpId, float baseDP, float tauDP, float base5HT, float tau5HT, float baseACh,
597  float tauACh, float baseNE, float tauNE) {
598 
599  assert(gGrpId >= -1);
600  assert(baseDP > 0.0f); assert(base5HT > 0.0f); assert(baseACh > 0.0f); assert(baseNE > 0.0f);
601  assert(tauDP > 0); assert(tau5HT > 0); assert(tauACh > 0); assert(tauNE > 0);
602 
603  if (gGrpId == ALL) { // shortcut for all groups
604  for (int grpId = 0; grpId < numGroups; grpId++) {
605  setNeuromodulator(grpId, baseDP, tauDP, base5HT, tau5HT, baseACh, tauACh, baseNE, tauNE);
606  }
607  } else {
608  groupConfigMap[gGrpId].neuromodulatorConfig.baseDP = baseDP;
609  groupConfigMap[gGrpId].neuromodulatorConfig.decayDP = 1.0f - (1.0f / tauDP);
610  groupConfigMap[gGrpId].neuromodulatorConfig.base5HT = base5HT;
611  groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT = 1.0f - (1.0f / tau5HT);
612  groupConfigMap[gGrpId].neuromodulatorConfig.baseACh = baseACh;
613  groupConfigMap[gGrpId].neuromodulatorConfig.decayACh = 1.0f - (1.0f / tauACh);
614  groupConfigMap[gGrpId].neuromodulatorConfig.baseNE = baseNE;
615  groupConfigMap[gGrpId].neuromodulatorConfig.decayNE = 1.0f - (1.0f / tauNE);
616  }
617 }
618 
619 // set ESTDP params
620 void SNN::setESTDP(int gGrpId, bool isSet, STDPType type, STDPCurve curve, float alphaPlus, float tauPlus, float alphaMinus, float tauMinus, float gamma) {
621  assert(gGrpId >= -1);
622  if (isSet) {
623  assert(type!=UNKNOWN_STDP);
624  assert(tauPlus > 0.0f); assert(tauMinus > 0.0f); assert(gamma >= 0.0f);
625  }
626 
627  if (gGrpId == ALL) { // shortcut for all groups
628  for(int grpId = 0; grpId < numGroups; grpId++) {
629  setESTDP(grpId, isSet, type, curve, alphaPlus, tauPlus, alphaMinus, tauMinus, gamma);
630  }
631  } else {
632  // set STDP for a given group
633  // set params for STDP curve
634  groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC = alphaPlus;
635  groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC = alphaMinus;
636  groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC = 1.0f / tauPlus;
637  groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC = 1.0f / tauMinus;
638  groupConfigMap[gGrpId].stdpConfig.GAMMA = gamma;
639  groupConfigMap[gGrpId].stdpConfig.KAPPA = (1 + exp(-gamma / tauPlus)) / (1 - exp(-gamma / tauPlus));
640  groupConfigMap[gGrpId].stdpConfig.OMEGA = alphaPlus * (1 - groupConfigMap[gGrpId].stdpConfig.KAPPA);
641  // set flags for STDP function
642  groupConfigMap[gGrpId].stdpConfig.WithESTDPtype = type;
643  groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve = curve;
644  groupConfigMap[gGrpId].stdpConfig.WithESTDP = isSet;
645  groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithESTDP;
646  sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
647 
648  KERNEL_INFO("E-STDP %s for %s(%d)", isSet?"enabled":"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
649  }
650 }
651 
652 // set ISTDP params
653 void SNN::setISTDP(int gGrpId, bool isSet, STDPType type, STDPCurve curve, float ab1, float ab2, float tau1, float tau2) {
654  assert(gGrpId >= -1);
655  if (isSet) {
656  assert(type != UNKNOWN_STDP);
657  assert(tau1 > 0); assert(tau2 > 0);
658  }
659 
660  if (gGrpId==ALL) { // shortcut for all groups
661  for(int grpId = 0; grpId < numGroups; grpId++) {
662  setISTDP(grpId, isSet, type, curve, ab1, ab2, tau1, tau2);
663  }
664  } else {
665  // set STDP for a given group
666  // set params for STDP curve
667  if (curve == EXP_CURVE) {
668  groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = ab1;
669  groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = ab2;
670  groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f / tau1;
671  groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f / tau2;
672  groupConfigMap[gGrpId].stdpConfig.BETA_LTP = 0.0f;
673  groupConfigMap[gGrpId].stdpConfig.BETA_LTD = 0.0f;
674  groupConfigMap[gGrpId].stdpConfig.LAMBDA = 1.0f;
675  groupConfigMap[gGrpId].stdpConfig.DELTA = 1.0f;
676  } else {
677  groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB = 0.0f;
678  groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB = 0.0f;
679  groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB = 1.0f;
680  groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB = 1.0f;
681  groupConfigMap[gGrpId].stdpConfig.BETA_LTP = ab1;
682  groupConfigMap[gGrpId].stdpConfig.BETA_LTD = ab2;
683  groupConfigMap[gGrpId].stdpConfig.LAMBDA = tau1;
684  groupConfigMap[gGrpId].stdpConfig.DELTA = tau2;
685  }
686  // set flags for STDP function
687  //FIXME: separate STDPType to ESTDPType and ISTDPType
688  groupConfigMap[gGrpId].stdpConfig.WithISTDPtype = type;
689  groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve = curve;
690  groupConfigMap[gGrpId].stdpConfig.WithISTDP = isSet;
691  groupConfigMap[gGrpId].stdpConfig.WithSTDP |= groupConfigMap[gGrpId].stdpConfig.WithISTDP;
692  sim_with_stdp |= groupConfigMap[gGrpId].stdpConfig.WithSTDP;
693 
694  KERNEL_INFO("I-STDP %s for %s(%d)", isSet?"enabled":"disabled", groupConfigMap[gGrpId].grpName.c_str(), gGrpId);
695  }
696 }
697 
698 // set STP params
699 void SNN::setSTP(int gGrpId, bool isSet, float STP_U, float STP_tau_u, float STP_tau_x) {
700  assert(gGrpId >= -1);
701  if (isSet) {
702  assert(STP_U > 0 && STP_U <= 1); assert(STP_tau_u > 0); assert(STP_tau_x > 0);
703  }
704 
705  if (gGrpId == ALL) { // shortcut for all groups
706  for(int grpId = 0; grpId < numGroups; grpId++) {
707  setSTP(grpId, isSet, STP_U, STP_tau_u, STP_tau_x);
708  }
709  } else {
710  // set STDP for a given group
711  sim_with_stp |= isSet;
712  groupConfigMap[gGrpId].stpConfig.WithSTP = isSet;
713  groupConfigMap[gGrpId].stpConfig.STP_A = (STP_U > 0.0f) ? 1.0 / STP_U : 1.0f; // scaling factor
714  groupConfigMap[gGrpId].stpConfig.STP_U = STP_U;
715  groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv = 1.0f / STP_tau_u; // facilitatory
716  groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv = 1.0f / STP_tau_x; // depressive
717 
718  KERNEL_INFO("STP %s for %d (%s):\tA: %1.4f, U: %1.4f, tau_u: %4.0f, tau_x: %4.0f", isSet?"enabled":"disabled",
719  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMap[gGrpId].stpConfig.STP_A, STP_U, STP_tau_u, STP_tau_x);
720  }
721 }
722 
723 void SNN::setWeightAndWeightChangeUpdate(UpdateInterval wtANDwtChangeUpdateInterval, bool enableWtChangeDecay, float wtChangeDecay) {
724  assert(wtChangeDecay > 0.0f && wtChangeDecay < 1.0f);
725 
726  switch (wtANDwtChangeUpdateInterval) {
727  case INTERVAL_10MS:
728  wtANDwtChangeUpdateInterval_ = 10;
729  break;
730  case INTERVAL_100MS:
731  wtANDwtChangeUpdateInterval_ = 100;
732  break;
733  case INTERVAL_1000MS:
734  default:
735  wtANDwtChangeUpdateInterval_ = 1000;
736  break;
737  }
738 
739  if (enableWtChangeDecay) {
740  // set up stdp factor according to update interval
741  switch (wtANDwtChangeUpdateInterval) {
742  case INTERVAL_10MS:
743  stdpScaleFactor_ = 0.005f;
744  break;
745  case INTERVAL_100MS:
746  stdpScaleFactor_ = 0.05f;
747  break;
748  case INTERVAL_1000MS:
749  default:
750  stdpScaleFactor_ = 0.5f;
751  break;
752  }
753  // set up weight decay
754  wtChangeDecay_ = wtChangeDecay;
755  } else {
756  stdpScaleFactor_ = 1.0f;
757  wtChangeDecay_ = 0.0f;
758  }
759 
760  KERNEL_INFO("Update weight and weight change every %d ms", wtANDwtChangeUpdateInterval_);
761  KERNEL_INFO("Weight Change Decay is %s", enableWtChangeDecay? "enabled" : "disable");
762  KERNEL_INFO("STDP scale factor = %1.3f, wtChangeDecay = %1.3f", stdpScaleFactor_, wtChangeDecay_);
763 }
764 
768 
769 // reorganize the network and do the necessary allocation
770 // of all variable for carrying out the simulation..
771 // this code is run only one time during network initialization
773  switch (snnState) {
774  case CONFIG_SNN:
775  compileSNN();
776  case COMPILED_SNN:
777  partitionSNN();
778  case PARTITIONED_SNN:
779  generateRuntimeSNN();
780  break;
781  case EXECUTABLE_SNN:
782  break;
783  default:
784  KERNEL_ERROR("Unknown SNN state");
785  break;
786  }
787 }
788 
792 
793 int SNN::runNetwork(int _nsec, int _nmsec, bool printRunSummary) {
794  assert(_nmsec >= 0 && _nmsec < 1000);
795  assert(_nsec >= 0);
796  int runDurationMs = _nsec*1000 + _nmsec;
797  KERNEL_DEBUG("runNetwork: runDur=%dms, printRunSummary=%s", runDurationMs, printRunSummary?"y":"n");
798 
799  // setupNetwork() must have already been called
800  assert(snnState == EXECUTABLE_SNN);
801 
802  // don't bother printing if logger mode is SILENT
803  printRunSummary = (loggerMode_==SILENT) ? false : printRunSummary;
804 
805  // first-time run: inform the user the simulation is running now
806  if (simTime==0 && printRunSummary) {
807  KERNEL_INFO("");
808  KERNEL_INFO("******************** Running the simulation on %d GPU(s) and %d CPU(s) ***************************", numGPUs, numCores);
809  KERNEL_INFO("");
810  }
811 
812  // reset all spike counters
813  resetSpikeCnt(ALL);
814 
815  // store current start time for future reference
816  simTimeRunStart = simTime;
817  simTimeRunStop = simTime + runDurationMs;
818  assert(simTimeRunStop >= simTimeRunStart); // check for arithmetic underflow
819 
820  // ConnectionMonitor is a special case: we might want the first snapshot at t=0 in the binary
821  // but updateTime() is false for simTime==0.
822  // And we cannot put this code in ConnectionMonitorCore::init, because then the user would have no
823  // way to call ConnectionMonitor::setUpdateTimeIntervalSec before...
824  if (simTime == 0 && numConnectionMonitor) {
826  }
827 
828  // set the Poisson generation time slice to be at the run duration up to MAX_TIME_SLICE
829  setGrpTimeSlice(ALL, std::max(1, std::min(runDurationMs, MAX_TIME_SLICE)));
830 
831 #ifndef __NO_CUDA__
832  CUDA_RESET_TIMER(timer);
833  CUDA_START_TIMER(timer);
834 #endif
835 
836  //KERNEL_INFO("Reached the advSimStep loop!");
837 
838  // if nsec=0, simTimeMs=10, we need to run the simulator for 10 timeStep;
839  // if nsec=1, simTimeMs=10, we need to run the simulator for 1*1000+10, time Step;
840  for(int i = 0; i < runDurationMs; i++) {
841  advSimStep();
842  //KERNEL_INFO("Executed an advSimStep!");
843 
844  // update weight every updateInterval ms if plastic synapses present
845  if (!sim_with_fixedwts && wtANDwtChangeUpdateInterval_ == ++wtANDwtChangeUpdateIntervalCnt_) {
846  wtANDwtChangeUpdateIntervalCnt_ = 0; // reset counter
847  if (!sim_in_testing) {
848  // keep this if statement separate from the above, so that the counter is updated correctly
849  updateWeights();
850  }
851  }
852 
853  // Note: updateTime() advance simTime, simTimeMs, and simTimeSec accordingly
854  if (updateTime()) {
855  // finished one sec of simulation...
856  if (numSpikeMonitor) {
858  }
859  if (numGroupMonitor) {
861  }
862  if (numConnectionMonitor) {
864  }
865  if (numNeuronMonitor) {
867  }
868 
869  shiftSpikeTables();
870  }
871 
872  fetchNeuronSpikeCount(ALL);
873  }
874 
875  //KERNEL_INFO("Updated monitors!");
876 
877  // user can opt to display some runNetwork summary
878  if (printRunSummary) {
879 
880  // if there are Monitors available and it's time to show the log, print status for each group
881  if (numSpikeMonitor) {
882  printStatusSpikeMonitor(ALL);
883  }
884  if (numConnectionMonitor) {
885  printStatusConnectionMonitor(ALL);
886  }
887  if (numGroupMonitor) {
888  printStatusGroupMonitor(ALL);
889  }
890 
891  // record time of run summary print
892  simTimeLastRunSummary = simTime;
893  }
894 
895  // call updateSpike(Group)Monitor again to fetch all the left-over spikes and group status (neuromodulator)
898 
899  // keep track of simulation time...
900 #ifndef __NO_CUDA__
901  CUDA_STOP_TIMER(timer);
902  lastExecutionTime = CUDA_GET_TIMER_VALUE(timer);
903  cumExecutionTime += lastExecutionTime;
904 #endif
905  return 0;
906 }
907 
908 
909 
913 
914 // adds a bias to every weight in the connection
915 void SNN::biasWeights(short int connId, float bias, bool updateWeightRange) {
916  assert(connId>=0 && connId<numConnections);
917 
918  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
919  int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
920 
921  fetchPreConnectionInfo(netId);
922  fetchConnIdsLookupArray(netId);
923  fetchSynapseState(netId);
924  // iterate over all postsynaptic neurons
925  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
926  unsigned int cumIdx = managerRuntimeData.cumulativePre[lNId];
927 
928  // iterate over all presynaptic neurons
929  unsigned int pos_ij = cumIdx;
930  for (int j = 0; j < managerRuntimeData.Npre[lNId]; pos_ij++, j++) {
931  if (managerRuntimeData.connIdsPreIdx[pos_ij] == connId) {
932  // apply bias to weight
933  float weight = managerRuntimeData.wt[pos_ij] + bias;
934 
935  // inform user of acton taken if weight is out of bounds
936 // bool needToPrintDebug = (weight+bias>connInfo->maxWt || weight+bias<connInfo->minWt);
937  bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
938 
939  if (updateWeightRange) {
940  // if this flag is set, we need to update minWt,maxWt accordingly
941  // will be saving new maxSynWt and copying to GPU below
942 // connInfo->minWt = fmin(connInfo->minWt, weight);
943  connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
944  if (needToPrintDebug) {
945  KERNEL_DEBUG("biasWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, bias,
946  (updateWeightRange?"true":"false"), 0.0f, connectConfigMap[connId].maxWt);
947  }
948  } else {
949  // constrain weight to boundary values
950  // compared to above, we swap minWt/maxWt logic
951  weight = std::min(weight, connectConfigMap[connId].maxWt);
952 // weight = fmax(weight, connInfo->minWt);
953  weight = std::max(weight, 0.0f);
954  if (needToPrintDebug) {
955  KERNEL_DEBUG("biasWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, bias,
956  (updateWeightRange?"true":"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
957  }
958  }
959 
960  // update datastructures
961  managerRuntimeData.wt[pos_ij] = weight;
962  managerRuntimeData.maxSynWt[pos_ij] = connectConfigMap[connId].maxWt; // it's easier to just update, even if it hasn't changed
963  }
964  }
965 
966  // update GPU datastructures in batches, grouped by post-neuron
967  if (netId < CPU_RUNTIME_BASE) {
968 #ifndef __NO_CUDA__
969  CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].wt[cumIdx]), &(managerRuntimeData.wt[cumIdx]), sizeof(float)*managerRuntimeData.Npre[lNId],
970  cudaMemcpyHostToDevice) );
971 
972  if (runtimeData[netId].maxSynWt != NULL) {
973  // only copy maxSynWt if datastructure actually exists on the GPU runtime
974  // (that logic should be done elsewhere though)
975  CUDA_CHECK_ERRORS( cudaMemcpy(&(runtimeData[netId].maxSynWt[cumIdx]), &(managerRuntimeData.maxSynWt[cumIdx]),
976  sizeof(float) * managerRuntimeData.Npre[lNId], cudaMemcpyHostToDevice) );
977  }
978 #else
979  assert(false);
980 #endif
981  } else {
982  memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
983 
984  if (runtimeData[netId].maxSynWt != NULL) {
985  // only copy maxSynWt if datastructure actually exists on the CPU runtime
986  // (that logic should be done elsewhere though)
987  memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
988  }
989  }
990  }
991 }
992 
993 // deallocates dynamical structures and exits
994 void SNN::exitSimulation(int val) {
995  deleteObjects();
996  exit(val);
997 }
998 
999 // reads network state from file
1000 void SNN::loadSimulation(FILE* fid) {
1001  loadSimFID = fid;
1002 }
1003 
1004 // multiplies every weight with a scaling factor
1005 void SNN::scaleWeights(short int connId, float scale, bool updateWeightRange) {
1006  assert(connId>=0 && connId<numConnections);
1007  assert(scale>=0.0f);
1008 
1009  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1010  int lGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1011 
1012  fetchPreConnectionInfo(netId);
1013  fetchConnIdsLookupArray(netId);
1014  fetchSynapseState(netId);
1015 
1016  // iterate over all postsynaptic neurons
1017  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
1018  unsigned int cumIdx = managerRuntimeData.cumulativePre[lNId];
1019 
1020  // iterate over all presynaptic neurons
1021  unsigned int pos_ij = cumIdx;
1022  for (int j = 0; j < managerRuntimeData.Npre[lNId]; pos_ij++, j++) {
1023  if (managerRuntimeData.connIdsPreIdx[pos_ij]==connId) {
1024  // apply bias to weight
1025  float weight = managerRuntimeData.wt[pos_ij] * scale;
1026 
1027  // inform user of acton taken if weight is out of bounds
1028 // bool needToPrintDebug = (weight>connInfo->maxWt || weight<connInfo->minWt);
1029  bool needToPrintDebug = (weight > connectConfigMap[connId].maxWt || weight < 0.0f);
1030 
1031  if (updateWeightRange) {
1032  // if this flag is set, we need to update minWt,maxWt accordingly
1033  // will be saving new maxSynWt and copying to GPU below
1034 // connInfo->minWt = fmin(connInfo->minWt, weight);
1035  connectConfigMap[connId].maxWt = std::max(connectConfigMap[connId].maxWt, weight);
1036  if (needToPrintDebug) {
1037  KERNEL_DEBUG("scaleWeights(%d,%f,%s): updated weight ranges to [%f,%f]", connId, scale,
1038  (updateWeightRange?"true":"false"), 0.0f, connectConfigMap[connId].maxWt);
1039  }
1040  } else {
1041  // constrain weight to boundary values
1042  // compared to above, we swap minWt/maxWt logic
1043  weight = std::min(weight, connectConfigMap[connId].maxWt);
1044 // weight = fmax(weight, connInfo->minWt);
1045  weight = std::max(weight, 0.0f);
1046  if (needToPrintDebug) {
1047  KERNEL_DEBUG("scaleWeights(%d,%f,%s): constrained weight %f to [%f,%f]", connId, scale,
1048  (updateWeightRange?"true":"false"), weight, 0.0f, connectConfigMap[connId].maxWt);
1049  }
1050  }
1051 
1052  // update datastructures
1053  managerRuntimeData.wt[pos_ij] = weight;
1054  managerRuntimeData.maxSynWt[pos_ij] = connectConfigMap[connId].maxWt; // it's easier to just update, even if it hasn't changed
1055  }
1056  }
1057 
1058  // update GPU datastructures in batches, grouped by post-neuron
1059  if (netId < CPU_RUNTIME_BASE) {
1060 #ifndef __NO_CUDA__
1061  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float)*managerRuntimeData.Npre[lNId],
1062  cudaMemcpyHostToDevice));
1063 
1064  if (runtimeData[netId].maxSynWt != NULL) {
1065  // only copy maxSynWt if datastructure actually exists on the GPU runtime
1066  // (that logic should be done elsewhere though)
1067  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx],
1068  sizeof(float) * managerRuntimeData.Npre[lNId], cudaMemcpyHostToDevice));
1069  }
1070 #else
1071  assert(false);
1072 #endif
1073  } else {
1074  memcpy(&runtimeData[netId].wt[cumIdx], &managerRuntimeData.wt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1075 
1076  if (runtimeData[netId].maxSynWt != NULL) {
1077  // only copy maxSynWt if datastructure actually exists on the CPU runtime
1078  // (that logic should be done elsewhere though)
1079  memcpy(&runtimeData[netId].maxSynWt[cumIdx], &managerRuntimeData.maxSynWt[cumIdx], sizeof(float) * managerRuntimeData.Npre[lNId]);
1080  }
1081  }
1082  }
1083 }
1084 
1085 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
1086 // or groupConfigMap is not sync with groupConfigs[0][]
1087 GroupMonitor* SNN::setGroupMonitor(int gGrpId, FILE* fid) {
1088  int netId = groupConfigMDMap[gGrpId].netId;
1089  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1090 
1091  // check whether group already has a GroupMonitor
1092  if (groupConfigMDMap[gGrpId].groupMonitorId >= 0) {
1093  KERNEL_ERROR("setGroupMonitor has already been called on Group %d (%s).", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1094  exitSimulation(1);
1095  }
1096 
1097  // create new GroupMonitorCore object in any case and initialize analysis components
1098  // grpMonObj destructor (see below) will deallocate it
1099  GroupMonitorCore* grpMonCoreObj = new GroupMonitorCore(this, numGroupMonitor, gGrpId);
1100  groupMonCoreList[numGroupMonitor] = grpMonCoreObj;
1101 
1102  // assign group status file ID if we selected to write to a file, else it's NULL
1103  // if file pointer exists, it has already been fopened
1104  // this will also write the header section of the group status file
1105  // grpMonCoreObj destructor will fclose it
1106  grpMonCoreObj->setGroupFileId(fid);
1107 
1108  // create a new GroupMonitor object for the user-interface
1109  // SNN::deleteObjects will deallocate it
1110  GroupMonitor* grpMonObj = new GroupMonitor(grpMonCoreObj);
1111  groupMonList[numGroupMonitor] = grpMonObj;
1112 
1113  // also inform the group that it is being monitored...
1114  groupConfigMDMap[gGrpId].groupMonitorId = numGroupMonitor;
1115 
1116  numGroupMonitor++;
1117  KERNEL_INFO("GroupMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1118 
1119  return grpMonObj;
1120 }
1121 
1122 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where group(connect)Config[] might not be available
1123 // or group(connect)ConfigMap is not sync with group(connect)Config[]
1124 ConnectionMonitor* SNN::setConnectionMonitor(int grpIdPre, int grpIdPost, FILE* fid) {
1125  // find connection based on pre-post pair
1126  short int connId = getConnectId(grpIdPre, grpIdPost);
1127  if (connId<0) {
1128  KERNEL_ERROR("No connection found from group %d(%s) to group %d(%s)", grpIdPre, getGroupName(grpIdPre).c_str(),
1129  grpIdPost, getGroupName(grpIdPost).c_str());
1130  exitSimulation(1);
1131  }
1132 
1133  // check whether connection already has a connection monitor
1134  if (connectConfigMap[connId].connectionMonitorId >= 0) {
1135  KERNEL_ERROR("setConnectionMonitor has already been called on Connection %d (MonitorId=%d)", connId, connectConfigMap[connId].connectionMonitorId);
1136  exitSimulation(1);
1137  }
1138 
1139  // inform the connection that it is being monitored...
1140  // this needs to be called before new ConnectionMonitorCore
1141  connectConfigMap[connId].connectionMonitorId = numConnectionMonitor;
1142 
1143  // create new ConnectionMonitorCore object in any case and initialize
1144  // connMonObj destructor (see below) will deallocate it
1145  ConnectionMonitorCore* connMonCoreObj = new ConnectionMonitorCore(this, numConnectionMonitor, connId,
1146  grpIdPre, grpIdPost);
1147  connMonCoreList[numConnectionMonitor] = connMonCoreObj;
1148 
1149  // assign conn file ID if we selected to write to a file, else it's NULL
1150  // if file pointer exists, it has already been fopened
1151  // this will also write the header section of the conn file
1152  // connMonCoreObj destructor will fclose it
1153  connMonCoreObj->setConnectFileId(fid);
1154 
1155  // create a new ConnectionMonitor object for the user-interface
1156  // SNN::deleteObjects will deallocate it
1157  ConnectionMonitor* connMonObj = new ConnectionMonitor(connMonCoreObj);
1158  connMonList[numConnectionMonitor] = connMonObj;
1159 
1160  // now init core object (depends on several datastructures allocated above)
1161  connMonCoreObj->init();
1162 
1163  numConnectionMonitor++;
1164  KERNEL_INFO("ConnectionMonitor %d set for Connection %d: %d(%s) => %d(%s)", connectConfigMap[connId].connectionMonitorId, connId, grpIdPre, getGroupName(grpIdPre).c_str(),
1165  grpIdPost, getGroupName(grpIdPost).c_str());
1166 
1167  return connMonObj;
1168 }
1169 
1170 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
1171 // or groupConfigMap is not sync with groupConfigs[0][]
1172 // sets up a spike generator
1173 void SNN::setSpikeGenerator(int gGrpId, SpikeGeneratorCore* spikeGenFunc) {
1174  assert(snnState == CONFIG_SNN); // must be called before setupNetwork() to work on GPU
1175  assert(spikeGenFunc);
1176  assert(groupConfigMap[gGrpId].isSpikeGenerator);
1177  groupConfigMap[gGrpId].spikeGenFunc = spikeGenFunc;
1178 }
1179 
1180 // record spike information, return a SpikeInfo object
1181 SpikeMonitor* SNN::setSpikeMonitor(int gGrpId, FILE* fid) {
1182  // check whether group already has a SpikeMonitor
1183  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1184  // in this case, return the current object and update fid
1185  SpikeMonitor* spkMonObj = getSpikeMonitor(gGrpId);
1186 
1187  // update spike file ID
1188  SpikeMonitorCore* spkMonCoreObj = getSpikeMonitorCore(gGrpId);
1189  spkMonCoreObj->setSpikeFileId(fid);
1190 
1191  KERNEL_INFO("SpikeMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1192  return spkMonObj;
1193  } else {
1194  // create new SpikeMonitorCore object in any case and initialize analysis components
1195  // spkMonObj destructor (see below) will deallocate it
1196  SpikeMonitorCore* spkMonCoreObj = new SpikeMonitorCore(this, numSpikeMonitor, gGrpId);
1197  spikeMonCoreList[numSpikeMonitor] = spkMonCoreObj;
1198 
1199  // assign spike file ID if we selected to write to a file, else it's NULL
1200  // if file pointer exists, it has already been fopened
1201  // this will also write the header section of the spike file
1202  // spkMonCoreObj destructor will fclose it
1203  spkMonCoreObj->setSpikeFileId(fid);
1204 
1205  // create a new SpikeMonitor object for the user-interface
1206  // SNN::deleteObjects will deallocate it
1207  SpikeMonitor* spkMonObj = new SpikeMonitor(spkMonCoreObj);
1208  spikeMonList[numSpikeMonitor] = spkMonObj;
1209 
1210  // also inform the grp that it is being monitored...
1211  groupConfigMDMap[gGrpId].spikeMonitorId = numSpikeMonitor;
1212 
1213  numSpikeMonitor++;
1214  KERNEL_INFO("SpikeMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1215 
1216  return spkMonObj;
1217  }
1218 }
1219 
1220 // record neuron state information, return a NeuronInfo object
1221 NeuronMonitor* SNN::setNeuronMonitor(int gGrpId, FILE* fid) {
1222  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1223  int netId = groupConfigMDMap[gGrpId].netId;
1224 
1225  if (getGroupNumNeurons(gGrpId) > 128) {
1226  KERNEL_WARN("Due to limited memory space, only the first 128 neurons can be monitored by NeuronMonitor");
1227  }
1228 
1229  // check whether group already has a NeuronMonitor
1230  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1231  // in this case, return the current object and update fid
1232  NeuronMonitor* nrnMonObj = getNeuronMonitor(gGrpId);
1233 
1234  // update neuron file ID
1235  NeuronMonitorCore* nrnMonCoreObj = getNeuronMonitorCore(gGrpId);
1236  nrnMonCoreObj->setNeuronFileId(fid);
1237 
1238  KERNEL_INFO("NeuronMonitor updated for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1239  return nrnMonObj;
1240  } else {
1241  // create new NeuronMonitorCore object in any case and initialize analysis components
1242  // nrnMonObj destructor (see below) will deallocate it
1243  NeuronMonitorCore* nrnMonCoreObj = new NeuronMonitorCore(this, numNeuronMonitor, gGrpId);
1244  neuronMonCoreList[numNeuronMonitor] = nrnMonCoreObj;
1245 
1246  // assign neuron state file ID if we selected to write to a file, else it's NULL
1247  // if file pointer exists, it has already been fopened
1248  // this will also write the header section of the spike file
1249  // spkMonCoreObj destructor will fclose it
1250  nrnMonCoreObj->setNeuronFileId(fid);
1251 
1252  // create a new NeuronMonitor object for the user-interface
1253  // SNN::deleteObjects will deallocate it
1254  NeuronMonitor* nrnMonObj = new NeuronMonitor(nrnMonCoreObj);
1255  neuronMonList[numNeuronMonitor] = nrnMonObj;
1256 
1257  // also inform the grp that it is being monitored...
1258  groupConfigMDMap[gGrpId].neuronMonitorId = numNeuronMonitor;
1259 
1260  numNeuronMonitor++;
1261  KERNEL_INFO("NeuronMonitor set for group %d (%s)", gGrpId, groupConfigMap[gGrpId].grpName.c_str());
1262 
1263  return nrnMonObj;
1264  }
1265 }
1266 
1267 // FIXME: distinguish the function call at CONFIG_STATE and RUN_STATE, where groupConfigs[0][] might not be available
1268 // or groupConfigMap is not sync with groupConfigs[0][]
1269 // assigns spike rate to group
1270 void SNN::setSpikeRate(int gGrpId, PoissonRate* ratePtr, int refPeriod) {
1271  int netId = groupConfigMDMap[gGrpId].netId;
1272  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
1273 
1274  assert(gGrpId >= 0 && lGrpId < networkConfigs[netId].numGroups);
1275  assert(ratePtr);
1276  assert(groupConfigMap[gGrpId].isSpikeGenerator);
1277  assert(ratePtr->getNumNeurons() == groupConfigMap[gGrpId].numN);
1278  assert(refPeriod >= 1);
1279 
1280  groupConfigMDMap[gGrpId].ratePtr = ratePtr;
1281  groupConfigMDMap[gGrpId].refractPeriod = refPeriod;
1282  spikeRateUpdated = true;
1283 }
1284 
1285 // sets the weight value of a specific synapse
1286 void SNN::setWeight(short int connId, int neurIdPre, int neurIdPost, float weight, bool updateWeightRange) {
1287  assert(connId>=0 && connId<getNumConnections());
1288  assert(weight>=0.0f);
1289 
1290  assert(neurIdPre >= 0 && neurIdPre < getGroupNumNeurons(connectConfigMap[connId].grpSrc));
1291  assert(neurIdPost >= 0 && neurIdPost < getGroupNumNeurons(connectConfigMap[connId].grpDest));
1292 
1293  float maxWt = fabs(connectConfigMap[connId].maxWt);
1294  float minWt = 0.0f;
1295 
1296  // inform user of acton taken if weight is out of bounds
1297  bool needToPrintDebug = (weight>maxWt || weight<minWt);
1298 
1299  int netId = groupConfigMDMap[connectConfigMap[connId].grpDest].netId;
1300  int postlGrpId = groupConfigMDMap[connectConfigMap[connId].grpDest].lGrpId;
1301  int prelGrpId = groupConfigMDMap[connectConfigMap[connId].grpSrc].lGrpId;
1302 
1303  fetchPreConnectionInfo(netId);
1304  fetchConnIdsLookupArray(netId);
1305  fetchSynapseState(netId);
1306 
1307  if (updateWeightRange) {
1308  // if this flag is set, we need to update minWt,maxWt accordingly
1309  // will be saving new maxSynWt and copying to GPU below
1310 // connInfo->minWt = fmin(connInfo->minWt, weight);
1311  maxWt = fmax(maxWt, weight);
1312  if (needToPrintDebug) {
1313  KERNEL_DEBUG("setWeight(%d,%d,%d,%f,%s): updated weight ranges to [%f,%f]", connId, neurIdPre, neurIdPost,
1314  weight, (updateWeightRange?"true":"false"), minWt, maxWt);
1315  }
1316  } else {
1317  // constrain weight to boundary values
1318  // compared to above, we swap minWt/maxWt logic
1319  weight = fmin(weight, maxWt);
1320  weight = fmax(weight, minWt);
1321  if (needToPrintDebug) {
1322  KERNEL_DEBUG("setWeight(%d,%d,%d,%f,%s): constrained weight %f to [%f,%f]", connId, neurIdPre, neurIdPost,
1323  weight, (updateWeightRange?"true":"false"), weight, minWt, maxWt);
1324  }
1325  }
1326 
1327  // find real ID of pre- and post-neuron
1328  int neurIdPreReal = groupConfigs[netId][prelGrpId].lStartN + neurIdPre;
1329  int neurIdPostReal = groupConfigs[netId][postlGrpId].lStartN + neurIdPost;
1330 
1331  // iterate over all presynaptic synapses until right one is found
1332  bool synapseFound = false;
1333  int pos_ij = managerRuntimeData.cumulativePre[neurIdPostReal];
1334  for (int j = 0; j < managerRuntimeData.Npre[neurIdPostReal]; pos_ij++, j++) {
1335  SynInfo* preId = &(managerRuntimeData.preSynapticIds[pos_ij]);
1336  int pre_nid = GET_CONN_NEURON_ID((*preId));
1337  if (GET_CONN_NEURON_ID((*preId)) == neurIdPreReal) {
1338  assert(managerRuntimeData.connIdsPreIdx[pos_ij] == connId); // make sure we've got the right connection ID
1339 
1340  managerRuntimeData.wt[pos_ij] = isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? weight : -1.0 * weight;
1341  managerRuntimeData.maxSynWt[pos_ij] = isExcitatoryGroup(connectConfigMap[connId].grpSrc) ? maxWt : -1.0 * maxWt;
1342 
1343  if (netId < CPU_RUNTIME_BASE) {
1344 #ifndef __NO_CUDA__
1345  // need to update datastructures on GPU runtime
1346  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.wt[pos_ij], sizeof(float), cudaMemcpyHostToDevice));
1347  if (runtimeData[netId].maxSynWt != NULL) {
1348  // only copy maxSynWt if datastructure actually exists on the GPU runtime
1349  // (that logic should be done elsewhere though)
1350  CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.maxSynWt[pos_ij], sizeof(float), cudaMemcpyHostToDevice));
1351  }
1352 #else
1353  assert(false);
1354 #endif
1355  } else {
1356  // need to update datastructures on CPU runtime
1357  memcpy(&runtimeData[netId].wt[pos_ij], &managerRuntimeData.wt[pos_ij], sizeof(float));
1358  if (runtimeData[netId].maxSynWt != NULL) {
1359  // only copy maxSynWt if datastructure actually exists on the CPU runtime
1360  // (that logic should be done elsewhere though)
1361  memcpy(&runtimeData[netId].maxSynWt[pos_ij], &managerRuntimeData.maxSynWt[pos_ij], sizeof(float));
1362  }
1363  }
1364 
1365  // synapse found and updated: we're done!
1366  synapseFound = true;
1367  break;
1368  }
1369  }
1370 
1371  if (!synapseFound) {
1372  KERNEL_WARN("setWeight(%d,%d,%d,%f,%s): Synapse does not exist, not updated.", connId, neurIdPre, neurIdPost,
1373  weight, (updateWeightRange?"true":"false"));
1374  }
1375 }
1376 
1377 void SNN::setExternalCurrent(int grpId, const std::vector<float>& current) {
1378  assert(grpId >= 0); assert(grpId < numGroups);
1379  assert(!isPoissonGroup(grpId));
1380  assert(current.size() == getGroupNumNeurons(grpId));
1381 
1382  int netId = groupConfigMDMap[grpId].netId;
1383  int lGrpId = groupConfigMDMap[grpId].lGrpId;
1384 
1385  // // update flag for faster handling at run-time
1386  // if (count_if(current.begin(), current.end(), isGreaterThanZero)) {
1387  // groupConfigs[0][grpId].WithCurrentInjection = true;
1388  // } else {
1389  // groupConfigs[0][grpId].WithCurrentInjection = false;
1390  // }
1391 
1392  // store external current in array
1393  for (int lNId = groupConfigs[netId][lGrpId].lStartN, j = 0; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++, j++) {
1394  managerRuntimeData.extCurrent[lNId] = current[j];
1395  }
1396 
1397  // copy to GPU if necessary
1398  // don't allocate; allocation done in generateRuntimeData
1399  if (netId < CPU_RUNTIME_BASE) {
1400  copyExternalCurrent(netId, lGrpId, &runtimeData[netId], cudaMemcpyHostToDevice, false);
1401  }
1402  else {
1403  copyExternalCurrent(netId, lGrpId, &runtimeData[netId], false);
1404  }
1405 }
1406 
1407 // writes network state to file
1408 // handling of file pointer should be handled externally: as far as this function is concerned, it is simply
1409 // trying to write to file
1410 void SNN::saveSimulation(FILE* fid, bool saveSynapseInfo) {
1411  int tmpInt;
1412  float tmpFloat;
1413 
1415 
1417  tmpInt = 294338571; // some int used to identify saveSimulation files
1418  if (!fwrite(&tmpInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1419 
1421  tmpFloat = 0.3f;
1422  if (!fwrite(&tmpFloat,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1423 
1425  tmpFloat = ((float)simTimeSec) + ((float)simTimeMs)/1000.0f;
1426  if (!fwrite(&tmpFloat,sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1427 
1429  stopTiming();
1430  tmpFloat = executionTime/1000.0f;
1431  if (!fwrite(&tmpFloat,sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1432 
1434 
1436  if (!fwrite(&glbNetworkConfig.numN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1437  int dummyInt = 0;
1438  //if (!fwrite(&numPreSynNet,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1439  // if (!fwrite(&dummyInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1440  //if (!fwrite(&numPostSynNet,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1441  // if (!fwrite(&dummyInt,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1442  if (!fwrite(&numGroups,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1443 
1445  char name[100];
1446  for (int gGrpId=0;gGrpId<numGroups;gGrpId++) {
1447  if (!fwrite(&groupConfigMDMap[gGrpId].gStartN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1448  if (!fwrite(&groupConfigMDMap[gGrpId].gEndN,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1449 
1450  if (!fwrite(&groupConfigMap[gGrpId].grid.numX,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1451  if (!fwrite(&groupConfigMap[gGrpId].grid.numY,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1452  if (!fwrite(&groupConfigMap[gGrpId].grid.numZ,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1453 
1454  strncpy(name,groupConfigMap[gGrpId].grpName.c_str(),100);
1455  if (!fwrite(name,1,100,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1456  }
1457 
1458  if (!saveSynapseInfo) return;
1459 
1460  // Save number of local networks
1461  int net_count = 0;
1462  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
1463  if (!groupPartitionLists[netId].empty()) {
1464  net_count++;
1465  }
1466  }
1467  if (!fwrite(&net_count, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1468 
1469  // Save weights for each local network
1470  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
1471  if (!groupPartitionLists[netId].empty()) {
1472  // copy from runtimeData to managerRuntimeData
1473  fetchPreConnectionInfo(netId);
1474  fetchPostConnectionInfo(netId);
1475  fetchConnIdsLookupArray(netId);
1476  fetchSynapseState(netId);
1477 
1478  // save number of synapses that starting from local groups
1479  int numSynToSave = 0;
1480  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
1481  if (grpIt->netId == netId) {
1482  numSynToSave += grpIt->numPostSynapses;
1483  }
1484  }
1485  if (!fwrite(&numSynToSave, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1486  // read synapse info from managerRuntimData
1487  int numSynSaved = 0;
1488  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) {
1489  unsigned int offset = managerRuntimeData.cumulativePost[lNId];
1490 
1491  // save each synapse starting from from neuron lNId
1492  for (int t = 0; t < glbNetworkConfig.maxDelay; t++) {
1493  DelayInfo dPar = managerRuntimeData.postDelayInfo[lNId*(glbNetworkConfig.maxDelay + 1)+t];
1494 
1495  for (int idx_d=dPar.delay_index_start; idx_d < (dPar.delay_index_start + dPar.delay_length); idx_d++) {
1496  SynInfo post_info = managerRuntimeData.postSynapticIds[offset + idx_d];
1497  int lNIdPost = GET_CONN_NEURON_ID(post_info);
1498  int lGrpIdPost = GET_CONN_GRP_ID(post_info);
1499  int preSynId = GET_CONN_SYN_ID(post_info);
1500  int pre_pos = managerRuntimeData.cumulativePre[lNIdPost] + preSynId;
1501  SynInfo pre_info = managerRuntimeData.preSynapticIds[pre_pos];
1502  int lNIdPre = GET_CONN_NEURON_ID(pre_info);
1503  int lGrpIdPre = GET_CONN_GRP_ID(pre_info);
1504  float weight = managerRuntimeData.wt[pre_pos];
1505  float maxWeight = managerRuntimeData.maxSynWt[pre_pos];
1506  int connId = managerRuntimeData.connIdsPreIdx[pre_pos];
1507  int delay = t+1;
1508 
1509  // convert local group id to global group id
1510  // convert local neuron id to neuron order in group
1511  int gGrpIdPre = groupConfigs[netId][lGrpIdPre].gGrpId;
1512  int gGrpIdPost = groupConfigs[netId][lGrpIdPost].gGrpId;
1513  int grpNIdPre = lNId - groupConfigs[netId][lGrpIdPre].lStartN;
1514  int grpNIdPost = lNIdPost - groupConfigs[netId][lGrpIdPost].lStartN;
1515 
1516  // we only save synapses starting from local groups since otherwise we will save external synapses twice
1517  // write order is based on function connectNeurons (no NetId & external_NetId)
1518  // inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, float initWt, float maxWt, uint8_t delay, int externalNetId)
1519  if (groupConfigMDMap[gGrpIdPre].netId == netId) {
1520  numSynSaved++;
1521  if (!fwrite(&gGrpIdPre, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1522  if (!fwrite(&gGrpIdPost, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1523  if (!fwrite(&grpNIdPre, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1524  if (!fwrite(&grpNIdPost, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1525  if (!fwrite(&connId, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1526  if (!fwrite(&weight, sizeof(float), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1527  if (!fwrite(&maxWeight, sizeof(float), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1528  if (!fwrite(&delay, sizeof(int), 1, fid)) KERNEL_ERROR("saveSimulation fwrite error");
1529  }
1530  }
1531  }
1532  }
1533  assert(numSynSaved == numSynToSave);
1534  }
1535  }
1536 
1537 
1539  //if (simMode_ == GPU_MODE)
1540  // copyWeightState(&managerRuntimeData, &runtimeData[0], cudaMemcpyDeviceToHost, false);
1542 
1544  //if (saveSynapseInfo) {
1545  // for (int i = 0; i < numN; i++) {
1546  // unsigned int offset = managerRuntimeData.cumulativePost[i];
1547 
1548  // unsigned int count = 0;
1549  // for (int t=0;t<maxDelay_;t++) {
1550  // DelayInfo dPar = managerRuntimeData.postDelayInfo[i*(maxDelay_+1)+t];
1551 
1552  // for(int idx_d=dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++)
1553  // count++;
1554  // }
1555 
1556  // if (!fwrite(&count,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1557 
1558  // for (int t=0;t<maxDelay_;t++) {
1559  // DelayInfo dPar = managerRuntimeData.postDelayInfo[i*(maxDelay_+1)+t];
1560 
1561  // for(int idx_d=dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++) {
1562  // // get synaptic info...
1563  // SynInfo post_info = managerRuntimeData.postSynapticIds[offset + idx_d];
1564 
1565  // // get neuron id
1566  // //int p_i = (post_info&POST_SYN_NEURON_MASK);
1567  // unsigned int p_i = GET_CONN_NEURON_ID(post_info);
1568  // assert(p_i<numN);
1569 
1570  // // get syn id
1571  // unsigned int s_i = GET_CONN_SYN_ID(post_info);
1572  // //>>POST_SYN_NEURON_BITS)&POST_SYN_CONN_MASK;
1573  // assert(s_i<(managerRuntimeData.Npre[p_i]));
1574 
1575  // // get the cumulative position for quick access...
1576  // unsigned int pos_i = managerRuntimeData.cumulativePre[p_i] + s_i;
1577 
1578  // uint8_t delay = t+1;
1579  // uint8_t plastic = s_i < managerRuntimeData.Npre_plastic[p_i]; // plastic or fixed.
1580 
1581  // if (!fwrite(&i,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1582  // if (!fwrite(&p_i,sizeof(int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1583  // if (!fwrite(&(managerRuntimeData.wt[pos_i]),sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1584  // if (!fwrite(&(managerRuntimeData.maxSynWt[pos_i]),sizeof(float),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1585  // if (!fwrite(&delay,sizeof(uint8_t),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1586  // if (!fwrite(&plastic,sizeof(uint8_t),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1587  // if (!fwrite(&(managerRuntimeData.connIdsPreIdx[pos_i]),sizeof(short int),1,fid)) KERNEL_ERROR("saveSimulation fwrite error");
1588  // }
1589  // }
1590  // }
1591  //}
1592 }
1593 
1594 // writes population weights from gIDpre to gIDpost to file fname in binary
1595 //void SNN::writePopWeights(std::string fname, int grpIdPre, int grpIdPost) {
1596 // assert(grpIdPre>=0); assert(grpIdPost>=0);
1597 //
1598 // float* weights;
1599 // int matrixSize;
1600 // FILE* fid;
1601 // int numPre, numPost;
1602 // fid = fopen(fname.c_str(), "wb");
1603 // assert(fid != NULL);
1604 //
1605 // if(snnState == CONFIG_SNN || snnState == COMPILED_SNN || snnState == PARTITIONED_SNN){
1606 // KERNEL_ERROR("Simulation has not been run yet, cannot output weights.");
1607 // exitSimulation(1);
1608 // }
1609 //
1610 // SynInfo* preId;
1611 // int pre_nid, pos_ij;
1612 //
1613 // //population sizes
1614 // numPre = groupConfigs[0][grpIdPre].SizeN;
1615 // numPost = groupConfigs[0][grpIdPost].SizeN;
1616 //
1617 // //first iteration gets the number of synaptic weights to place in our
1618 // //weight matrix.
1619 // matrixSize=0;
1620 // //iterate over all neurons in the post group
1621 // for (int i=groupConfigs[0][grpIdPost].StartN; i<=groupConfigs[0][grpIdPost].EndN; i++) {
1622 // // for every post-neuron, find all pre
1623 // pos_ij = managerRuntimeData.cumulativePre[i]; // i-th neuron, j=0th synapse
1624 // //iterate over all presynaptic synapses
1625 // for(int j=0; j<managerRuntimeData.Npre[i]; pos_ij++,j++) {
1626 // preId = &managerRuntimeData.preSynapticIds[pos_ij];
1627 // pre_nid = GET_CONN_NEURON_ID((*preId)); // neuron id of pre
1628 // if (pre_nid<groupConfigs[0][grpIdPre].StartN || pre_nid>groupConfigs[0][grpIdPre].EndN)
1629 // continue; // connection does not belong to group grpIdPre
1630 // matrixSize++;
1631 // }
1632 // }
1633 //
1634 // //now we have the correct size
1635 // weights = new float[matrixSize];
1636 // //second iteration assigns the weights
1637 // int curr = 0; // iterator for return array
1638 // //iterate over all neurons in the post group
1639 // for (int i=groupConfigs[0][grpIdPost].StartN; i<=groupConfigs[0][grpIdPost].EndN; i++) {
1640 // // for every post-neuron, find all pre
1641 // pos_ij = managerRuntimeData.cumulativePre[i]; // i-th neuron, j=0th synapse
1642 // //do the GPU copy here. Copy the current weights from GPU to CPU.
1643 // if(simMode_==GPU_MODE){
1644 // copyWeightsGPU(i,grpIdPre);
1645 // }
1646 // //iterate over all presynaptic synapses
1647 // for(int j=0; j<managerRuntimeData.Npre[i]; pos_ij++,j++) {
1648 // preId = &(managerRuntimeData.preSynapticIds[pos_ij]);
1649 // pre_nid = GET_CONN_NEURON_ID((*preId)); // neuron id of pre
1650 // if (pre_nid<groupConfigs[0][grpIdPre].StartN || pre_nid>groupConfigs[0][grpIdPre].EndN)
1651 // continue; // connection does not belong to group grpIdPre
1652 // weights[curr] = managerRuntimeData.wt[pos_ij];
1653 // curr++;
1654 // }
1655 // }
1656 //
1657 // fwrite(weights,sizeof(float),matrixSize,fid);
1658 // fclose(fid);
1659 // //Let my memory FREE!!!
1660 // delete [] weights;
1661 //}
1662 
1663 
1667 
1668 // set new file pointer for all files
1669 // fp==NULL is code for don't change it
1670 // can be called in all logger modes; however, the analogous interface function can only be called in CUSTOM
1671 void SNN::setLogsFp(FILE* fpInf, FILE* fpErr, FILE* fpDeb, FILE* fpLog) {
1672  if (fpInf!=NULL) {
1673  if (fpInf_!=NULL && fpInf_!=stdout && fpInf_!=stderr)
1674  fclose(fpInf_);
1675  fpInf_ = fpInf;
1676  }
1677 
1678  if (fpErr!=NULL) {
1679  if (fpErr_ != NULL && fpErr_!=stdout && fpErr_!=stderr)
1680  fclose(fpErr_);
1681  fpErr_ = fpErr;
1682  }
1683 
1684  if (fpDeb!=NULL) {
1685  if (fpDeb_!=NULL && fpDeb_!=stdout && fpDeb_!=stderr)
1686  fclose(fpDeb_);
1687  fpDeb_ = fpDeb;
1688  }
1689 
1690  if (fpLog!=NULL) {
1691  if (fpLog_!=NULL && fpLog_!=stdout && fpLog_!=stderr)
1692  fclose(fpLog_);
1693  fpLog_ = fpLog;
1694  }
1695 }
1696 
1697 
1701 
1702 // loop over linked list entries to find a connection with the right pre-post pair, O(N)
1703 short int SNN::getConnectId(int grpIdPre, int grpIdPost) {
1704  short int connId = -1;
1705 
1706  for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
1707  if (it->second.grpSrc == grpIdPre && it->second.grpDest == grpIdPost) {
1708  connId = it->second.connId;
1709  break;
1710  }
1711  }
1712 
1713  return connId;
1714 }
1715 
1717  CHECK_CONNECTION_ID(connId, numConnections);
1718 
1719  if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1720  KERNEL_ERROR("Total Connections = %d", numConnections);
1721  KERNEL_ERROR("ConnectId (%d) cannot be recognized", connId);
1722  }
1723 
1724  return connectConfigMap[connId];
1725 }
1726 
1727 std::vector<float> SNN::getConductanceAMPA(int gGrpId) {
1728  assert(isSimulationWithCOBA());
1729 
1730  // copy data to the manager runtime
1731  fetchConductanceAMPA(gGrpId);
1732 
1733  std::vector<float> gAMPAvec;
1734  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1735  gAMPAvec.push_back(managerRuntimeData.gAMPA[gNId]);
1736  }
1737  return gAMPAvec;
1738 }
1739 
1740 std::vector<float> SNN::getConductanceNMDA(int gGrpId) {
1741  assert(isSimulationWithCOBA());
1742 
1743  // copy data to the manager runtime
1744  fetchConductanceNMDA(gGrpId);
1745 
1746  std::vector<float> gNMDAvec;
1747  if (isSimulationWithNMDARise()) {
1748  // need to construct conductance from rise and decay parts
1749  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1750  gNMDAvec.push_back(managerRuntimeData.gNMDA_d[gNId] - managerRuntimeData.gNMDA_r[gNId]);
1751  }
1752  } else {
1753  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1754  gNMDAvec.push_back(managerRuntimeData.gNMDA[gNId]);
1755  }
1756  }
1757  return gNMDAvec;
1758 }
1759 
1760 std::vector<float> SNN::getConductanceGABAa(int gGrpId) {
1761  assert(isSimulationWithCOBA());
1762 
1763  // copy data to the manager runtime
1764  fetchConductanceGABAa(gGrpId);
1765 
1766  std::vector<float> gGABAaVec;
1767  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1768  gGABAaVec.push_back(managerRuntimeData.gGABAa[gNId]);
1769  }
1770  return gGABAaVec;
1771 }
1772 
1773 std::vector<float> SNN::getConductanceGABAb(int gGrpId) {
1774  assert(isSimulationWithCOBA());
1775 
1776  // copy data to the manager runtime
1777  fetchConductanceGABAb(gGrpId);
1778 
1779  std::vector<float> gGABAbVec;
1780  if (isSimulationWithGABAbRise()) {
1781  // need to construct conductance from rise and decay parts
1782  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1783  gGABAbVec.push_back(managerRuntimeData.gGABAb_d[gNId] - managerRuntimeData.gGABAb_r[gNId]);
1784  }
1785  } else {
1786  for (int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
1787  gGABAbVec.push_back(managerRuntimeData.gGABAb[gNId]);
1788  }
1789  }
1790  return gGABAbVec;
1791 }
1792 
1793 // returns RangeDelay struct of a connection
1794 RangeDelay SNN::getDelayRange(short int connId) {
1795  assert(connId>=0 && connId<numConnections);
1796 
1797  return RangeDelay(connectConfigMap[connId].minDelay, connectConfigMap[connId].maxDelay);
1798 }
1799 
1800 // \TODO: bad API design (return allocated memory to user), consider to move this function to connection monitor
1801 uint8_t* SNN::getDelays(int gGrpIdPre, int gGrpIdPost, int& numPreN, int& numPostN) {
1802  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
1803  int lGrpIdPost = groupConfigMDMap[gGrpIdPost].lGrpId;
1804  int lGrpIdPre = -1;
1805  uint8_t* delays;
1806 
1807  for (int lGrpId = 0; lGrpId < networkConfigs[netIdPost].numGroupsAssigned; lGrpId++)
1808  if (groupConfigs[netIdPost][lGrpId].gGrpId == gGrpIdPre) {
1809  lGrpIdPre = lGrpId;
1810  break;
1811  }
1812  assert(lGrpIdPre != -1);
1813 
1814  numPreN = groupConfigMap[gGrpIdPre].numN;
1815  numPostN = groupConfigMap[gGrpIdPost].numN;
1816 
1817  delays = new uint8_t[numPreN * numPostN];
1818  memset(delays, 0, numPreN * numPostN);
1819 
1820  fetchPostConnectionInfo(netIdPost);
1821 
1822  for (int lNIdPre = groupConfigs[netIdPost][lGrpIdPre].lStartN; lNIdPre < groupConfigs[netIdPost][lGrpIdPre].lEndN; lNIdPre++) {
1823  unsigned int offset = managerRuntimeData.cumulativePost[lNIdPre];
1824 
1825  for (int t = 0; t < glbNetworkConfig.maxDelay; t++) {
1826  DelayInfo dPar = managerRuntimeData.postDelayInfo[lNIdPre * (glbNetworkConfig.maxDelay + 1) + t];
1827 
1828  for(int idx_d = dPar.delay_index_start; idx_d<(dPar.delay_index_start+dPar.delay_length); idx_d++) {
1829  // get synaptic info...
1830  SynInfo postSynInfo = managerRuntimeData.postSynapticIds[offset + idx_d];
1831 
1832  // get local post neuron id
1833  int lNIdPost = GET_CONN_NEURON_ID(postSynInfo);
1834  assert(lNIdPost < glbNetworkConfig.numN);
1835 
1836  if (lNIdPost >= groupConfigs[netIdPost][lGrpIdPost].lStartN && lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN) {
1837  delays[(lNIdPre - groupConfigs[netIdPost][lGrpIdPre].lStartN) + numPreN * (lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN)] = t + 1;
1838  }
1839  }
1840  }
1841  }
1842  return delays;
1843 }
1844 
1846  assert(gGrpId >= 0 && gGrpId < numGroups);
1847 
1848  return groupConfigMap[gGrpId].grid;
1849 }
1850 
1851 // find ID of group with name grpName
1852 int SNN::getGroupId(std::string grpName) {
1853  int grpId = -1;
1854  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
1855  if (groupConfigMap[gGrpId].grpName.compare(grpName) == 0) {
1856  grpId = gGrpId;
1857  break;
1858  }
1859  }
1860 
1861  return grpId;
1862 }
1863 
1864 std::string SNN::getGroupName(int gGrpId) {
1865  assert(gGrpId >= -1 && gGrpId < numGroups);
1866 
1867  if (gGrpId == ALL)
1868  return "ALL";
1869 
1870  return groupConfigMap[gGrpId].grpName;
1871 }
1872 
1874  GroupSTDPInfo gInfo;
1875 
1876  gInfo.WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
1877  gInfo.WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
1878  gInfo.WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
1879  gInfo.WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
1880  gInfo.WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
1881  gInfo.WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
1882  gInfo.WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
1883  gInfo.ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
1884  gInfo.ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
1885  gInfo.TAU_MINUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC;
1886  gInfo.TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
1887  gInfo.ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
1888  gInfo.ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
1889  gInfo.TAU_MINUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB;
1890  gInfo.TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
1891  gInfo.GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
1892  gInfo.BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
1893  gInfo.BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
1894  gInfo.LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
1895  gInfo.DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
1896 
1897  return gInfo;
1898 }
1899 
1902 
1903  gInfo.baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
1904  gInfo.base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
1905  gInfo.baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
1906  gInfo.baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
1907  gInfo.decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
1908  gInfo.decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
1909  gInfo.decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
1910  gInfo.decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
1911 
1912  return gInfo;
1913 }
1914 
1916  int gGrpId = -1;
1917  assert(gNId >= 0 && gNId < glbNetworkConfig.numN);
1918 
1919  // search for global group id
1920  for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
1921  if (gNId >= grpIt->second.gStartN && gNId <= grpIt->second.gEndN)
1922  gGrpId = grpIt->second.gGrpId;
1923  }
1924 
1925  // adjust neurId for neuron ID of first neuron in the group
1926  int neurId = gNId - groupConfigMDMap[gGrpId].gStartN;
1927 
1928  return getNeuronLocation3D(gGrpId, neurId);
1929 }
1930 
1931 Point3D SNN::getNeuronLocation3D(int gGrpId, int relNeurId) {
1932  Grid3D grid = groupConfigMap[gGrpId].grid;
1933  assert(gGrpId >= 0 && gGrpId < numGroups);
1934  assert(relNeurId >= 0 && relNeurId < getGroupNumNeurons(gGrpId));
1935 
1936  int intX = relNeurId % grid.numX;
1937  int intY = (relNeurId / grid.numX) % grid.numY;
1938  int intZ = relNeurId / (grid.numX * grid.numY);
1939 
1940  // get coordinates center around origin
1941  double coordX = grid.distX * intX + grid.offsetX;
1942  double coordY = grid.distY * intY + grid.offsetY;
1943  double coordZ = grid.distZ * intZ + grid.offsetZ;
1944  return Point3D(coordX, coordY, coordZ);
1945 }
1946 
1947 // returns the number of synaptic connections associated with this connection.
1948 int SNN::getNumSynapticConnections(short int connId) {
1949  //we didn't find the connection.
1950  if (connectConfigMap.find(connId) == connectConfigMap.end()) {
1951  KERNEL_ERROR("Connection ID was not found. Quitting.");
1952  exitSimulation(1);
1953  }
1954 
1955  return connectConfigMap[connId].numberOfConnections;
1956 }
1957 
1958 // returns pointer to existing SpikeMonitor object, NULL else
1960  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1961 
1962  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1963  return spikeMonList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1964  } else {
1965  return NULL;
1966  }
1967 }
1968 
1970  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1971 
1972  if (groupConfigMDMap[gGrpId].spikeMonitorId >= 0) {
1973  return spikeMonCoreList[(groupConfigMDMap[gGrpId].spikeMonitorId)];
1974  } else {
1975  return NULL;
1976  }
1977 }
1978 
1979 // returns pointer to existing NeuronMonitor object, NULL else
1981  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1982 
1983  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1984  return neuronMonList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
1985  }
1986  else {
1987  return NULL;
1988  }
1989 }
1990 
1992  assert(gGrpId >= 0 && gGrpId < getNumGroups());
1993 
1994  if (groupConfigMDMap[gGrpId].neuronMonitorId >= 0) {
1995  return neuronMonCoreList[(groupConfigMDMap[gGrpId].neuronMonitorId)];
1996  }
1997  else {
1998  return NULL;
1999  }
2000 }
2001 
2003  assert(connId>=0 && connId<numConnections);
2004 
2005  return RangeWeight(0.0f, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt);
2006 }
2007 
2008 
2012 
2013 // all unsafe operations of SNN constructor
2014 void SNN::SNNinit() {
2015  // initialize snnState
2016  snnState = CONFIG_SNN;
2017 
2018  // set logger mode (defines where to print all status, error, and debug messages)
2019  switch (loggerMode_) {
2020  case USER:
2021  fpInf_ = stdout;
2022  fpErr_ = stderr;
2023  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2024  fpDeb_ = fopen("nul","w");
2025  #else
2026  fpDeb_ = fopen("/dev/null","w");
2027  #endif
2028  break;
2029  case DEVELOPER:
2030  fpInf_ = stdout;
2031  fpErr_ = stderr;
2032  fpDeb_ = stdout;
2033  break;
2034  case SHOWTIME:
2035  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2036  fpInf_ = fopen("nul","w");
2037  #else
2038  fpInf_ = fopen("/dev/null","w");
2039  #endif
2040  fpErr_ = stderr;
2041  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2042  fpDeb_ = fopen("nul","w");
2043  #else
2044  fpDeb_ = fopen("/dev/null","w");
2045  #endif
2046  break;
2047  case SILENT:
2048  case CUSTOM:
2049  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2050  fpInf_ = fopen("nul","w");
2051  fpErr_ = fopen("nul","w");
2052  fpDeb_ = fopen("nul","w");
2053  #else
2054  fpInf_ = fopen("/dev/null","w");
2055  fpErr_ = fopen("/dev/null","w");
2056  fpDeb_ = fopen("/dev/null","w");
2057  #endif
2058  break;
2059  default:
2060  fpErr_ = stderr; // need to open file stream first
2061  KERNEL_ERROR("Unknown logger mode");
2062  exit(UNKNOWN_LOGGER_ERROR);
2063 
2064  }
2065 
2066  // try to open log file in results folder: create if not exists
2067 #if defined(WIN32) || defined(WIN64)
2068  CreateDirectory("results", NULL);
2069  fpLog_ = fopen("results/carlsim.log", "w");
2070 #else
2071  struct stat sb;
2072  int createDir = 1;
2073  if (stat("results", &sb) == -1 || !S_ISDIR(sb.st_mode)) {
2074  // results dir does not exist, try to create:
2075  createDir = mkdir("results", 0777);
2076  }
2077 
2078  if (createDir == -1) {
2079  // tried to create dir, but failed
2080  fprintf(stderr, "Could not create directory \"results/\", which is required to "
2081  "store simulation results. Aborting simulation...\n");
2082  exit(NO_LOGGER_DIR_ERROR);
2083  } else {
2084  // open log file
2085  fpLog_ = fopen("results/carlsim.log", "w");
2086 
2087  if (createDir == 0) {
2088  // newly created dir: now that fpLog_/fpInf_ exist, inform user
2089  KERNEL_INFO("Created results directory \"results/\".");
2090  }
2091  }
2092 #endif
2093  if (fpLog_ == NULL) {
2094  fprintf(stderr, "Could not create the directory \"results/\" or the log file \"results/carlsim.log\""
2095  ", which is required to store simulation results. Aborting simulation...\n");
2096  exit(NO_LOGGER_DIR_ERROR);
2097  }
2098 
2099  KERNEL_INFO("*********************************************************************************");
2100  KERNEL_INFO("******************** Welcome to CARLsim %d.%d ***************************",
2102  KERNEL_INFO("*********************************************************************************\n");
2103 
2104  KERNEL_INFO("***************************** Configuring Network ********************************");
2105  KERNEL_INFO("Starting CARLsim simulation \"%s\" in %s mode",networkName_.c_str(),
2106  loggerMode_string[loggerMode_]);
2107  KERNEL_INFO("Random number seed: %d",randSeed_);
2108 
2109  time_t rawtime;
2110  struct tm * timeinfo;
2111  time(&rawtime);
2112  timeinfo = localtime(&rawtime);
2113  KERNEL_DEBUG("Current local time and date: %s", asctime(timeinfo));
2114 
2115  // init random seed
2116  srand48(randSeed_);
2117 
2118  simTimeRunStart = 0; simTimeRunStop = 0;
2119  simTimeLastRunSummary = 0;
2120  simTimeMs = 0; simTimeSec = 0; simTime = 0;
2121 
2122  numGroups = 0;
2123  numConnections = 0;
2124  numCompartmentConnections = 0;
2125  numSpikeGenGrps = 0;
2126  simulatorDeleted = false;
2127 
2128  cumExecutionTime = 0.0;
2129  executionTime = 0.0;
2130 
2131  spikeRateUpdated = false;
2132  numSpikeMonitor = 0;
2133  numNeuronMonitor = 0;
2134  numGroupMonitor = 0;
2135  numConnectionMonitor = 0;
2136 
2137  sim_with_compartments = false;
2138  sim_with_fixedwts = true; // default is true, will be set to false if there are any plastic synapses
2139  sim_with_conductances = false; // default is false
2140  sim_with_stdp = false;
2141  sim_with_modulated_stdp = false;
2142  sim_with_homeostasis = false;
2143  sim_with_stp = false;
2144  sim_in_testing = false;
2145 
2146  loadSimFID = NULL;
2147 
2148  // conductance info struct for simulation
2149  sim_with_NMDA_rise = false;
2150  sim_with_GABAb_rise = false;
2151  dAMPA = 1.0-1.0/5.0; // some default decay and rise times
2152  rNMDA = 1.0-1.0/10.0;
2153  dNMDA = 1.0-1.0/150.0;
2154  sNMDA = 1.0;
2155  dGABAa = 1.0-1.0/6.0;
2156  rGABAb = 1.0-1.0/100.0;
2157  dGABAb = 1.0-1.0/150.0;
2158  sGABAb = 1.0;
2159 
2160  // default integration method: Forward-Euler with 0.5ms integration step
2162 
2163  mulSynFast = NULL;
2164  mulSynSlow = NULL;
2165 
2166  // reset all monitors, don't deallocate (false)
2167  resetMonitors(false);
2168 
2169  resetGroupConfigs(false);
2170 
2171  resetConnectionConfigs(false);
2172 
2173  // initialize spike buffer
2174  spikeBuf = new SpikeBuffer(0, MAX_TIME_SLICE);
2175 
2176  memset(networkConfigs, 0, sizeof(NetworkConfigRT) * MAX_NET_PER_SNN);
2177 
2178  // reset all runtime data
2179  // GPU/CPU runtime data
2180  memset(runtimeData, 0, sizeof(RuntimeData) * MAX_NET_PER_SNN);
2181  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) // FIXME: redundant??
2182  runtimeData[netId].allocated = false;
2183 
2184  // Manager runtime data
2185  memset(&managerRuntimeData, 0, sizeof(RuntimeData));
2186  managerRuntimeData.allocated = false; // FIXME: redundant??
2187 
2188  // default weight update parameter
2189  wtANDwtChangeUpdateInterval_ = 1000; // update weights every 1000 ms (default)
2190  wtANDwtChangeUpdateIntervalCnt_ = 0; // helper var to implement fast modulo
2191  stdpScaleFactor_ = 1.0f;
2192  wtChangeDecay_ = 0.0f;
2193 
2194  // FIXME: use it when necessary
2195 #ifndef __NO_CUDA__
2196  CUDA_CREATE_TIMER(timer);
2197  CUDA_RESET_TIMER(timer);
2198 #endif
2199 }
2200 
2201 void SNN::advSimStep() {
2202  doSTPUpdateAndDecayCond();
2203 
2204  //KERNEL_INFO("STPUpdate!");
2205 
2206  spikeGeneratorUpdate();
2207 
2208  //KERNEL_INFO("spikeGeneratorUpdate!");
2209 
2210  findFiring();
2211 
2212  //KERNEL_INFO("Find firing!");
2213 
2214  updateTimingTable();
2215 
2216  routeSpikes();
2217 
2218  doCurrentUpdate();
2219 
2220  //KERNEL_INFO("doCurrentUpdate!");
2221 
2222  globalStateUpdate();
2223 
2224  //KERNEL_INFO("globalStateUpdate!");
2225 
2226  clearExtFiringTable();
2227 }
2228 
2229 void SNN::doSTPUpdateAndDecayCond() {
2230  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2231  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2232  cpu_set_t cpus;
2233  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2234  int threadCount = 0;
2235  #endif
2236 
2237  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2238  if (!groupPartitionLists[netId].empty()) {
2239  assert(runtimeData[netId].allocated);
2240  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2241  doSTPUpdateAndDecayCond_GPU(netId);
2242  else{//CPU runtime
2243  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2244  doSTPUpdateAndDecayCond_CPU(netId);
2245  #else // Linux or MAC
2246  pthread_attr_t attr;
2247  pthread_attr_init(&attr);
2248  CPU_ZERO(&cpus);
2249  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2250  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2251 
2252  argsThreadRoutine[threadCount].snn_pointer = this;
2253  argsThreadRoutine[threadCount].netId = netId;
2254  argsThreadRoutine[threadCount].lGrpId = 0;
2255  argsThreadRoutine[threadCount].startIdx = 0;
2256  argsThreadRoutine[threadCount].endIdx = 0;
2257  argsThreadRoutine[threadCount].GtoLOffset = 0;
2258 
2259  pthread_create(&threads[threadCount], &attr, &SNN::helperDoSTPUpdateAndDecayCond_CPU, (void*)&argsThreadRoutine[threadCount]);
2260  pthread_attr_destroy(&attr);
2261  threadCount++;
2262  #endif
2263  }
2264  }
2265  }
2266 
2267  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2268  // join all the threads
2269  for (int i=0; i<threadCount; i++){
2270  pthread_join(threads[i], NULL);
2271  }
2272  #endif
2273 }
2274 
2275 void SNN::spikeGeneratorUpdate() {
2276  // If poisson rate has been updated, assign new poisson rate
2277  if (spikeRateUpdated) {
2278  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2279  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2280  cpu_set_t cpus;
2281  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2282  int threadCount = 0;
2283  #endif
2284 
2285  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2286  if (!groupPartitionLists[netId].empty()) {
2287  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2288  assignPoissonFiringRate_GPU(netId);
2289  else{ // CPU runtime
2290  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2291  assignPoissonFiringRate_CPU(netId);
2292  #else // Linux or MAC
2293  pthread_attr_t attr;
2294  pthread_attr_init(&attr);
2295  CPU_ZERO(&cpus);
2296  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2297  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2298 
2299  argsThreadRoutine[threadCount].snn_pointer = this;
2300  argsThreadRoutine[threadCount].netId = netId;
2301  argsThreadRoutine[threadCount].lGrpId = 0;
2302  argsThreadRoutine[threadCount].startIdx = 0;
2303  argsThreadRoutine[threadCount].endIdx = 0;
2304  argsThreadRoutine[threadCount].GtoLOffset = 0;
2305 
2306  pthread_create(&threads[threadCount], &attr, &SNN::helperAssignPoissonFiringRate_CPU, (void*)&argsThreadRoutine[threadCount]);
2307  pthread_attr_destroy(&attr);
2308  threadCount++;
2309  #endif
2310  }
2311  }
2312  }
2313 
2314  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2315  // join all the threads
2316  for (int i=0; i<threadCount; i++){
2317  pthread_join(threads[i], NULL);
2318  }
2319  #endif
2320 
2321  spikeRateUpdated = false;
2322  }
2323 
2324  // If time slice has expired, check if new spikes needs to be generated by user-defined spike generators
2325  generateUserDefinedSpikes();
2326 
2327  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2328  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2329  cpu_set_t cpus;
2330  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2331  int threadCount = 0;
2332  #endif
2333 
2334  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2335  if (!groupPartitionLists[netId].empty()) {
2336  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2337  spikeGeneratorUpdate_GPU(netId);
2338  else{ // CPU runtime
2339  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2340  spikeGeneratorUpdate_CPU(netId);
2341  #else // Linux or MAC
2342  pthread_attr_t attr;
2343  pthread_attr_init(&attr);
2344  CPU_ZERO(&cpus);
2345  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2346  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2347 
2348  argsThreadRoutine[threadCount].snn_pointer = this;
2349  argsThreadRoutine[threadCount].netId = netId;
2350  argsThreadRoutine[threadCount].lGrpId = 0;
2351  argsThreadRoutine[threadCount].startIdx = 0;
2352  argsThreadRoutine[threadCount].endIdx = 0;
2353  argsThreadRoutine[threadCount].GtoLOffset = 0;
2354 
2355  pthread_create(&threads[threadCount], &attr, &SNN::helperSpikeGeneratorUpdate_CPU, (void*)&argsThreadRoutine[threadCount]);
2356  pthread_attr_destroy(&attr);
2357  threadCount++;
2358  #endif
2359  }
2360  }
2361  }
2362 
2363  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2364  // join all the threads
2365  for (int i=0; i<threadCount; i++){
2366  pthread_join(threads[i], NULL);
2367  }
2368  #endif
2369 
2370  // tell the spike buffer to advance to the next time step
2371  spikeBuf->step();
2372 }
2373 
2374 void SNN::findFiring() {
2375  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2376  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2377  cpu_set_t cpus;
2378  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2379  int threadCount = 0;
2380  #endif
2381 
2382  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2383  if (!groupPartitionLists[netId].empty()) {
2384  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2385  findFiring_GPU(netId);
2386  else {// CPU runtime
2387  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2388  findFiring_CPU(netId);
2389  #else // Linux or MAC
2390  pthread_attr_t attr;
2391  pthread_attr_init(&attr);
2392  CPU_ZERO(&cpus);
2393  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2394  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2395 
2396  argsThreadRoutine[threadCount].snn_pointer = this;
2397  argsThreadRoutine[threadCount].netId = netId;
2398  argsThreadRoutine[threadCount].lGrpId = 0;
2399  argsThreadRoutine[threadCount].startIdx = 0;
2400  argsThreadRoutine[threadCount].endIdx = 0;
2401  argsThreadRoutine[threadCount].GtoLOffset = 0;
2402 
2403  pthread_create(&threads[threadCount], &attr, &SNN::helperFindFiring_CPU, (void*)&argsThreadRoutine[threadCount]);
2404  pthread_attr_destroy(&attr);
2405  threadCount++;
2406  #endif
2407  }
2408  }
2409  }
2410 
2411  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2412  // join all the threads
2413  for (int i=0; i<threadCount; i++){
2414  pthread_join(threads[i], NULL);
2415  }
2416  #endif
2417 }
2418 
2419 void SNN::doCurrentUpdate() {
2420  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2421  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2422  cpu_set_t cpus;
2423  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2424  int threadCount = 0;
2425  #endif
2426 
2427  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2428  if (!groupPartitionLists[netId].empty()) {
2429  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2430  doCurrentUpdateD2_GPU(netId);
2431  else{ // CPU runtime
2432  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2433  doCurrentUpdateD2_CPU(netId);
2434  #else // Linux or MAC
2435  pthread_attr_t attr;
2436  pthread_attr_init(&attr);
2437  CPU_ZERO(&cpus);
2438  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2439  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2440 
2441  argsThreadRoutine[threadCount].snn_pointer = this;
2442  argsThreadRoutine[threadCount].netId = netId;
2443  argsThreadRoutine[threadCount].lGrpId = 0;
2444  argsThreadRoutine[threadCount].startIdx = 0;
2445  argsThreadRoutine[threadCount].endIdx = 0;
2446  argsThreadRoutine[threadCount].GtoLOffset = 0;
2447 
2448  pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD2_CPU, (void*)&argsThreadRoutine[threadCount]);
2449  pthread_attr_destroy(&attr);
2450  threadCount++;
2451  #endif
2452  }
2453  }
2454  }
2455 
2456  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2457  // join all the threads
2458  for (int i=0; i<threadCount; i++){
2459  pthread_join(threads[i], NULL);
2460  }
2461  threadCount = 0;
2462  #endif
2463 
2464  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2465  if (!groupPartitionLists[netId].empty()) {
2466  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2467  doCurrentUpdateD1_GPU(netId);
2468  else{ // CPU runtime
2469  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2470  doCurrentUpdateD1_CPU(netId);
2471  #else // Linux or MAC
2472  pthread_attr_t attr;
2473  pthread_attr_init(&attr);
2474  CPU_ZERO(&cpus);
2475  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2476  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2477 
2478  argsThreadRoutine[threadCount].snn_pointer = this;
2479  argsThreadRoutine[threadCount].netId = netId;
2480  argsThreadRoutine[threadCount].lGrpId = 0;
2481  argsThreadRoutine[threadCount].startIdx = 0;
2482  argsThreadRoutine[threadCount].endIdx = 0;
2483  argsThreadRoutine[threadCount].GtoLOffset = 0;
2484 
2485  pthread_create(&threads[threadCount], &attr, &SNN::helperDoCurrentUpdateD1_CPU, (void*)&argsThreadRoutine[threadCount]);
2486  pthread_attr_destroy(&attr);
2487  threadCount++;
2488  #endif
2489  }
2490  }
2491  }
2492 
2493  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2494  // join all the threads
2495  for (int i=0; i<threadCount; i++){
2496  pthread_join(threads[i], NULL);
2497  }
2498  #endif
2499 }
2500 
2501 void SNN::updateTimingTable() {
2502  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2503  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2504  cpu_set_t cpus;
2505  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2506  int threadCount = 0;
2507  #endif
2508 
2509  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2510  if (!groupPartitionLists[netId].empty()) {
2511  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2512  updateTimingTable_GPU(netId);
2513  else{ // CPU runtime
2514  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2515  updateTimingTable_CPU(netId);
2516  #else // Linux or MAC
2517  pthread_attr_t attr;
2518  pthread_attr_init(&attr);
2519  CPU_ZERO(&cpus);
2520  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2521  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2522 
2523  argsThreadRoutine[threadCount].snn_pointer = this;
2524  argsThreadRoutine[threadCount].netId = netId;
2525  argsThreadRoutine[threadCount].lGrpId = 0;
2526  argsThreadRoutine[threadCount].startIdx = 0;
2527  argsThreadRoutine[threadCount].endIdx = 0;
2528  argsThreadRoutine[threadCount].GtoLOffset = 0;
2529 
2530  pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateTimingTable_CPU, (void*)&argsThreadRoutine[threadCount]);
2531  pthread_attr_destroy(&attr);
2532  threadCount++;
2533  #endif
2534  }
2535  }
2536  }
2537  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2538  // join all the threads
2539  for (int i=0; i<threadCount; i++){
2540  pthread_join(threads[i], NULL);
2541  }
2542  #endif
2543 }
2544 
2545 void SNN::globalStateUpdate() {
2546  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2547  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2548  cpu_set_t cpus;
2549  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2550  int threadCount = 0;
2551  #endif
2552 
2553  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2554  if (!groupPartitionLists[netId].empty()) {
2555  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2556  globalStateUpdate_C_GPU(netId);
2557  else{ // CPU runtime
2558  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2559  globalStateUpdate_CPU(netId);
2560  #else // Linux or MAC
2561  pthread_attr_t attr;
2562  pthread_attr_init(&attr);
2563  CPU_ZERO(&cpus);
2564  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2565  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2566 
2567  argsThreadRoutine[threadCount].snn_pointer = this;
2568  argsThreadRoutine[threadCount].netId = netId;
2569  argsThreadRoutine[threadCount].lGrpId = 0;
2570  argsThreadRoutine[threadCount].startIdx = 0;
2571  argsThreadRoutine[threadCount].endIdx = 0;
2572  argsThreadRoutine[threadCount].GtoLOffset = 0;
2573 
2574  pthread_create(&threads[threadCount], &attr, &SNN::helperGlobalStateUpdate_CPU, (void*)&argsThreadRoutine[threadCount]);
2575  pthread_attr_destroy(&attr);
2576  threadCount++;
2577  #endif
2578  }
2579  }
2580  }
2581 
2582  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2583  // join all the threads
2584  for (int i=0; i<threadCount; i++){
2585  pthread_join(threads[i], NULL);
2586  }
2587  #endif
2588 
2589  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2590  if (!groupPartitionLists[netId].empty()) {
2591  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2592  globalStateUpdate_N_GPU(netId);
2593  }
2594  }
2595 
2596  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2597  if (!groupPartitionLists[netId].empty()) {
2598  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2599  globalStateUpdate_G_GPU(netId);
2600  }
2601  }
2602 }
2603 
2604 void SNN::clearExtFiringTable() {
2605  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2606  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2607  cpu_set_t cpus;
2608  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2609  int threadCount = 0;
2610  #endif
2611 
2612  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2613  if (!groupPartitionLists[netId].empty()) {
2614  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2615  clearExtFiringTable_GPU(netId);
2616  else{ // CPU runtime
2617  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2618  clearExtFiringTable_CPU(netId);
2619  #else // Linux or MAC
2620  pthread_attr_t attr;
2621  pthread_attr_init(&attr);
2622  CPU_ZERO(&cpus);
2623  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2624  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2625 
2626  argsThreadRoutine[threadCount].snn_pointer = this;
2627  argsThreadRoutine[threadCount].netId = netId;
2628  argsThreadRoutine[threadCount].lGrpId = 0;
2629  argsThreadRoutine[threadCount].startIdx = 0;
2630  argsThreadRoutine[threadCount].endIdx = 0;
2631  argsThreadRoutine[threadCount].GtoLOffset = 0;
2632 
2633  pthread_create(&threads[threadCount], &attr, &SNN::helperClearExtFiringTable_CPU, (void*)&argsThreadRoutine[threadCount]);
2634  pthread_attr_destroy(&attr);
2635  threadCount++;
2636  #endif
2637  }
2638  }
2639  }
2640 
2641  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2642  // join all the threads
2643  for (int i=0; i<threadCount; i++){
2644  pthread_join(threads[i], NULL);
2645  }
2646  #endif
2647 }
2648 
2649 void SNN::updateWeights() {
2650  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2651  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2652  cpu_set_t cpus;
2653  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2654  int threadCount = 0;
2655  #endif
2656 
2657  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2658  if (!groupPartitionLists[netId].empty()) {
2659  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2660  updateWeights_GPU(netId);
2661  else{ // CPU runtime
2662  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2663  updateWeights_CPU(netId);
2664  #else // Linux or MAC
2665  pthread_attr_t attr;
2666  pthread_attr_init(&attr);
2667  CPU_ZERO(&cpus);
2668  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2669  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2670 
2671  argsThreadRoutine[threadCount].snn_pointer = this;
2672  argsThreadRoutine[threadCount].netId = netId;
2673  argsThreadRoutine[threadCount].lGrpId = 0;
2674  argsThreadRoutine[threadCount].startIdx = 0;
2675  argsThreadRoutine[threadCount].endIdx = 0;
2676  argsThreadRoutine[threadCount].GtoLOffset = 0;
2677 
2678  pthread_create(&threads[threadCount], &attr, &SNN::helperUpdateWeights_CPU, (void*)&argsThreadRoutine[threadCount]);
2679  pthread_attr_destroy(&attr);
2680  threadCount++;
2681  #endif
2682  }
2683  }
2684  }
2685  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2686  // join all the threads
2687  for (int i=0; i<threadCount; i++){
2688  pthread_join(threads[i], NULL);
2689  }
2690  #endif
2691 
2692 }
2693 
2694 void SNN::updateNetworkConfig(int netId) {
2695  assert(netId < MAX_NET_PER_SNN);
2696 
2697  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2698  copyNetworkConfig(netId, cudaMemcpyHostToDevice);
2699  else
2700  copyNetworkConfig(netId); // CPU runtime
2701 }
2702 
2703 void SNN::shiftSpikeTables() {
2704  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2705  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
2706  cpu_set_t cpus;
2707  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
2708  int threadCount = 0;
2709  #endif
2710 
2711  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2712  if (!groupPartitionLists[netId].empty()) {
2713  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2714  shiftSpikeTables_F_GPU(netId);
2715  else { // CPU runtime
2716  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
2717  shiftSpikeTables_CPU(netId);
2718  #else // Linux or MAC
2719  pthread_attr_t attr;
2720  pthread_attr_init(&attr);
2721  CPU_ZERO(&cpus);
2722  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
2723  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
2724 
2725  argsThreadRoutine[threadCount].snn_pointer = this;
2726  argsThreadRoutine[threadCount].netId = netId;
2727  argsThreadRoutine[threadCount].lGrpId = 0;
2728  argsThreadRoutine[threadCount].startIdx = 0;
2729  argsThreadRoutine[threadCount].endIdx = 0;
2730  argsThreadRoutine[threadCount].GtoLOffset = 0;
2731 
2732  pthread_create(&threads[threadCount], &attr, &SNN::helperShiftSpikeTables_CPU, (void*)&argsThreadRoutine[threadCount]);
2733  pthread_attr_destroy(&attr);
2734  threadCount++;
2735  #endif
2736  }
2737  }
2738  }
2739 
2740  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
2741  // join all the threads
2742  for (int i=0; i<threadCount; i++){
2743  pthread_join(threads[i], NULL);
2744  }
2745  #endif
2746 
2747  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2748  if (!groupPartitionLists[netId].empty()) {
2749  if (netId < CPU_RUNTIME_BASE) // GPU runtime
2750  shiftSpikeTables_T_GPU(netId);
2751  }
2752  }
2753 }
2754 
2755 void SNN::allocateSNN(int netId) {
2756  assert(netId > ANY && netId < MAX_NET_PER_SNN);
2757 
2758  if (netId < CPU_RUNTIME_BASE)
2759  allocateSNN_GPU(netId);
2760  else
2761  allocateSNN_CPU(netId);
2762 }
2763 
2764 void SNN::allocateManagerRuntimeData() {
2765  // reset variable related to spike count
2766  managerRuntimeData.spikeCountSec = 0;
2767  managerRuntimeData.spikeCountD1Sec = 0;
2768  managerRuntimeData.spikeCountD2Sec = 0;
2769  managerRuntimeData.spikeCountLastSecLeftD2 = 0;
2770  managerRuntimeData.spikeCount = 0;
2771  managerRuntimeData.spikeCountD1 = 0;
2772  managerRuntimeData.spikeCountD2 = 0;
2773  managerRuntimeData.nPoissonSpikes = 0;
2774  managerRuntimeData.spikeCountExtRxD1 = 0;
2775  managerRuntimeData.spikeCountExtRxD2 = 0;
2776 
2777  managerRuntimeData.voltage = new float[managerRTDSize.maxNumNReg];
2778  managerRuntimeData.nextVoltage = new float[managerRTDSize.maxNumNReg];
2779  managerRuntimeData.recovery = new float[managerRTDSize.maxNumNReg];
2780  managerRuntimeData.Izh_a = new float[managerRTDSize.maxNumNReg];
2781  managerRuntimeData.Izh_b = new float[managerRTDSize.maxNumNReg];
2782  managerRuntimeData.Izh_c = new float[managerRTDSize.maxNumNReg];
2783  managerRuntimeData.Izh_d = new float[managerRTDSize.maxNumNReg];
2784  managerRuntimeData.Izh_C = new float[managerRTDSize.maxNumNReg];
2785  managerRuntimeData.Izh_k = new float[managerRTDSize.maxNumNReg];
2786  managerRuntimeData.Izh_vr = new float[managerRTDSize.maxNumNReg];
2787  managerRuntimeData.Izh_vt = new float[managerRTDSize.maxNumNReg];
2788  managerRuntimeData.Izh_vpeak = new float[managerRTDSize.maxNumNReg];
2789  managerRuntimeData.lif_tau_m = new int[managerRTDSize.maxNumNReg];
2790  managerRuntimeData.lif_tau_ref = new int[managerRTDSize.maxNumNReg];
2791  managerRuntimeData.lif_tau_ref_c = new int[managerRTDSize.maxNumNReg];
2792  managerRuntimeData.lif_vTh = new float[managerRTDSize.maxNumNReg];
2793  managerRuntimeData.lif_vReset = new float[managerRTDSize.maxNumNReg];
2794  managerRuntimeData.lif_gain = new float[managerRTDSize.maxNumNReg];
2795  managerRuntimeData.lif_bias = new float[managerRTDSize.maxNumNReg];
2796  managerRuntimeData.current = new float[managerRTDSize.maxNumNReg];
2797  managerRuntimeData.extCurrent = new float[managerRTDSize.maxNumNReg];
2798  managerRuntimeData.totalCurrent = new float[managerRTDSize.maxNumNReg];
2799  managerRuntimeData.curSpike = new bool[managerRTDSize.maxNumNReg];
2800  memset(managerRuntimeData.voltage, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2801  memset(managerRuntimeData.nextVoltage, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2802  memset(managerRuntimeData.recovery, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2803  memset(managerRuntimeData.Izh_a, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2804  memset(managerRuntimeData.Izh_b, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2805  memset(managerRuntimeData.Izh_c, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2806  memset(managerRuntimeData.Izh_d, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2807  memset(managerRuntimeData.Izh_C, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2808  memset(managerRuntimeData.Izh_k, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2809  memset(managerRuntimeData.Izh_vr, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2810  memset(managerRuntimeData.Izh_vt, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2811  memset(managerRuntimeData.Izh_vpeak, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2812  memset(managerRuntimeData.lif_tau_m, 0, sizeof(int) * managerRTDSize.maxNumNReg);
2813  memset(managerRuntimeData.lif_tau_ref, 0, sizeof(int) * managerRTDSize.maxNumNReg);
2814  memset(managerRuntimeData.lif_tau_ref_c, 0, sizeof(int) * managerRTDSize.maxNumNReg);
2815  memset(managerRuntimeData.lif_vTh, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2816  memset(managerRuntimeData.lif_vReset, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2817  memset(managerRuntimeData.lif_gain, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2818  memset(managerRuntimeData.lif_bias, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2819  memset(managerRuntimeData.current, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2820  memset(managerRuntimeData.extCurrent, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2821  memset(managerRuntimeData.totalCurrent, 0, sizeof(float) * managerRTDSize.maxNumNReg);
2822  memset(managerRuntimeData.curSpike, 0, sizeof(bool) * managerRTDSize.maxNumNReg);
2823 
2824  managerRuntimeData.nVBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups]; // 1 second v buffer
2825  managerRuntimeData.nUBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups];
2826  managerRuntimeData.nIBuffer = new float[MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups];
2827  memset(managerRuntimeData.nVBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
2828  memset(managerRuntimeData.nUBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
2829  memset(managerRuntimeData.nIBuffer, 0, sizeof(float) * MAX_NEURON_MON_GRP_SZIE * 1000 * managerRTDSize.maxNumGroups);
2830 
2831  managerRuntimeData.gAMPA = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2832  managerRuntimeData.gNMDA_r = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2833  managerRuntimeData.gNMDA_d = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2834  managerRuntimeData.gNMDA = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2835  memset(managerRuntimeData.gAMPA, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2836  memset(managerRuntimeData.gNMDA_r, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2837  memset(managerRuntimeData.gNMDA_d, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2838  memset(managerRuntimeData.gNMDA, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2839 
2840  managerRuntimeData.gGABAa = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2841  managerRuntimeData.gGABAb_r = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2842  managerRuntimeData.gGABAb_d = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2843  managerRuntimeData.gGABAb = new float[managerRTDSize.glbNumNReg]; // sufficient to hold all regular neurons in the global network
2844  memset(managerRuntimeData.gGABAa, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2845  memset(managerRuntimeData.gGABAb_r, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2846  memset(managerRuntimeData.gGABAb_d, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2847  memset(managerRuntimeData.gGABAb, 0, sizeof(float) * managerRTDSize.glbNumNReg);
2848 
2849  // allocate neuromodulators and their assistive buffers
2850  managerRuntimeData.grpDA = new float[managerRTDSize.maxNumGroups];
2851  managerRuntimeData.grp5HT = new float[managerRTDSize.maxNumGroups];
2852  managerRuntimeData.grpACh = new float[managerRTDSize.maxNumGroups];
2853  managerRuntimeData.grpNE = new float[managerRTDSize.maxNumGroups];
2854  memset(managerRuntimeData.grpDA, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2855  memset(managerRuntimeData.grp5HT, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2856  memset(managerRuntimeData.grpACh, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2857  memset(managerRuntimeData.grpNE, 0, sizeof(float) * managerRTDSize.maxNumGroups);
2858 
2859 
2860  managerRuntimeData.grpDABuffer = new float[managerRTDSize.maxNumGroups * 1000]; // 1 second DA buffer
2861  managerRuntimeData.grp5HTBuffer = new float[managerRTDSize.maxNumGroups * 1000];
2862  managerRuntimeData.grpAChBuffer = new float[managerRTDSize.maxNumGroups * 1000];
2863  managerRuntimeData.grpNEBuffer = new float[managerRTDSize.maxNumGroups * 1000];
2864  memset(managerRuntimeData.grpDABuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2865  memset(managerRuntimeData.grp5HTBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2866  memset(managerRuntimeData.grpAChBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2867  memset(managerRuntimeData.grpNEBuffer, 0, managerRTDSize.maxNumGroups * sizeof(float) * 1000);
2868 
2869  managerRuntimeData.lastSpikeTime = new int[managerRTDSize.maxNumNAssigned];
2870  memset(managerRuntimeData.lastSpikeTime, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
2871 
2872  managerRuntimeData.nSpikeCnt = new int[managerRTDSize.glbNumN];
2873  memset(managerRuntimeData.nSpikeCnt, 0, sizeof(int) * managerRTDSize.glbNumN); // sufficient to hold all neurons in the global network
2874 
2876  managerRuntimeData.avgFiring = new float[managerRTDSize.maxNumN];
2877  managerRuntimeData.baseFiring = new float[managerRTDSize.maxNumN];
2878  memset(managerRuntimeData.avgFiring, 0, sizeof(float) * managerRTDSize.maxNumN);
2879  memset(managerRuntimeData.baseFiring, 0, sizeof(float) * managerRTDSize.maxNumN);
2880 
2881  // STP can be applied to spike generators, too -> numN
2882  // \TODO: The size of these data structures could be reduced to the max synaptic delay of all
2883  // connections with STP. That number might not be the same as maxDelay_.
2884  managerRuntimeData.stpu = new float[managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1)];
2885  managerRuntimeData.stpx = new float[managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1)];
2886  memset(managerRuntimeData.stpu, 0, sizeof(float) * managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1));
2887  memset(managerRuntimeData.stpx, 0, sizeof(float) * managerRTDSize.maxNumN * (glbNetworkConfig.maxDelay + 1));
2888 
2889  managerRuntimeData.Npre = new unsigned short[managerRTDSize.maxNumNAssigned];
2890  managerRuntimeData.Npre_plastic = new unsigned short[managerRTDSize.maxNumNAssigned];
2891  managerRuntimeData.Npost = new unsigned short[managerRTDSize.maxNumNAssigned];
2892  managerRuntimeData.cumulativePost = new unsigned int[managerRTDSize.maxNumNAssigned];
2893  managerRuntimeData.cumulativePre = new unsigned int[managerRTDSize.maxNumNAssigned];
2894  memset(managerRuntimeData.Npre, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
2895  memset(managerRuntimeData.Npre_plastic, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
2896  memset(managerRuntimeData.Npost, 0, sizeof(short) * managerRTDSize.maxNumNAssigned);
2897  memset(managerRuntimeData.cumulativePost, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
2898  memset(managerRuntimeData.cumulativePre, 0, sizeof(int) * managerRTDSize.maxNumNAssigned);
2899 
2900  managerRuntimeData.postSynapticIds = new SynInfo[managerRTDSize.maxNumPostSynNet];
2901  managerRuntimeData.postDelayInfo = new DelayInfo[managerRTDSize.maxNumNAssigned * (glbNetworkConfig.maxDelay + 1)];
2902  memset(managerRuntimeData.postSynapticIds, 0, sizeof(SynInfo) * managerRTDSize.maxNumPostSynNet);
2903  memset(managerRuntimeData.postDelayInfo, 0, sizeof(DelayInfo) * managerRTDSize.maxNumNAssigned * (glbNetworkConfig.maxDelay + 1));
2904 
2905  managerRuntimeData.preSynapticIds = new SynInfo[managerRTDSize.maxNumPreSynNet];
2906  memset(managerRuntimeData.preSynapticIds, 0, sizeof(SynInfo) * managerRTDSize.maxNumPreSynNet);
2907 
2908  managerRuntimeData.wt = new float[managerRTDSize.maxNumPreSynNet];
2909  managerRuntimeData.wtChange = new float[managerRTDSize.maxNumPreSynNet];
2910  managerRuntimeData.maxSynWt = new float[managerRTDSize.maxNumPreSynNet];
2911  managerRuntimeData.synSpikeTime = new int[managerRTDSize.maxNumPreSynNet];
2912  memset(managerRuntimeData.wt, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
2913  memset(managerRuntimeData.wtChange, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
2914  memset(managerRuntimeData.maxSynWt, 0, sizeof(float) * managerRTDSize.maxNumPreSynNet);
2915  memset(managerRuntimeData.synSpikeTime, 0, sizeof(int) * managerRTDSize.maxNumPreSynNet);
2916 
2917  mulSynFast = new float[managerRTDSize.maxNumConnections];
2918  mulSynSlow = new float[managerRTDSize.maxNumConnections];
2919  memset(mulSynFast, 0, sizeof(float) * managerRTDSize.maxNumConnections);
2920  memset(mulSynSlow, 0, sizeof(float) * managerRTDSize.maxNumConnections);
2921 
2922  managerRuntimeData.connIdsPreIdx = new short int[managerRTDSize.maxNumPreSynNet];
2923  memset(managerRuntimeData.connIdsPreIdx, 0, sizeof(short int) * managerRTDSize.maxNumPreSynNet);
2924 
2925  managerRuntimeData.grpIds = new short int[managerRTDSize.maxNumNAssigned];
2926  memset(managerRuntimeData.grpIds, 0, sizeof(short int) * managerRTDSize.maxNumNAssigned);
2927 
2928  managerRuntimeData.spikeGenBits = new unsigned int[managerRTDSize.maxNumNSpikeGen / 32 + 1];
2929 
2930  // Confirm allocation of SNN runtime data in main memory
2931  managerRuntimeData.allocated = true;
2932  managerRuntimeData.memType = CPU_MEM;
2933 }
2934 
2935 int SNN::assignGroup(int gGrpId, int availableNeuronId) {
2936  int newAvailableNeuronId;
2937  assert(groupConfigMDMap[gGrpId].gStartN == -1); // The group has not yet been assigned
2938  groupConfigMDMap[gGrpId].gStartN = availableNeuronId;
2939  groupConfigMDMap[gGrpId].gEndN = availableNeuronId + groupConfigMap[gGrpId].numN - 1;
2940 
2941  KERNEL_DEBUG("Allocation for %d(%s), St=%d, End=%d",
2942  gGrpId, groupConfigMap[gGrpId].grpName.c_str(), groupConfigMDMap[gGrpId].gStartN, groupConfigMDMap[gGrpId].gEndN);
2943 
2944  newAvailableNeuronId = availableNeuronId + groupConfigMap[gGrpId].numN;
2945  //assert(newAvailableNeuronId <= numN);
2946 
2947  return newAvailableNeuronId;
2948 }
2949 
2950 int SNN::assignGroup(std::list<GroupConfigMD>::iterator grpIt, int localGroupId, int availableNeuronId) {
2951  int newAvailableNeuronId;
2952  assert(grpIt->lGrpId == -1); // The group has not yet been assigned
2953  grpIt->lGrpId = localGroupId;
2954  grpIt->lStartN = availableNeuronId;
2955  grpIt->lEndN = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN - 1;
2956 
2957  grpIt->LtoGOffset = grpIt->gStartN - grpIt->lStartN;
2958  grpIt->GtoLOffset = grpIt->lStartN - grpIt->gStartN;
2959 
2960  KERNEL_DEBUG("Allocation for group (%s) [id:%d, local id:%d], St=%d, End=%d", groupConfigMap[grpIt->gGrpId].grpName.c_str(),
2961  grpIt->gGrpId, grpIt->lGrpId, grpIt->lStartN, grpIt->lEndN);
2962 
2963  newAvailableNeuronId = availableNeuronId + groupConfigMap[grpIt->gGrpId].numN;
2964 
2965  return newAvailableNeuronId;
2966 }
2967 
2968 void SNN::generateGroupRuntime(int netId, int lGrpId) {
2969  resetNeuromodulator(netId, lGrpId);
2970 
2971  for(int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
2972  resetNeuron(netId, lGrpId, lNId);
2973 }
2974 
2975 void SNN::generateRuntimeGroupConfigs() {
2976  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
2977  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
2978  // publish the group configs in an array for quick access and accessible on GPUs (cuda doesn't support std::list)
2979  int gGrpId = grpIt->gGrpId;
2980  int lGrpId = grpIt->lGrpId;
2981 
2982  // Data published by groupConfigMDMap[] are generated in compileSNN() and are invariant in partitionSNN()
2983  // Data published by grpIt are generated in partitionSNN() and maybe have duplicated copys
2984  groupConfigs[netId][lGrpId].netId = grpIt->netId;
2985  groupConfigs[netId][lGrpId].gGrpId = grpIt->gGrpId;
2986  groupConfigs[netId][lGrpId].gStartN = grpIt->gStartN;
2987  groupConfigs[netId][lGrpId].gEndN = grpIt->gEndN;
2988  groupConfigs[netId][lGrpId].lGrpId = grpIt->lGrpId;
2989  groupConfigs[netId][lGrpId].lStartN = grpIt->lStartN;
2990  groupConfigs[netId][lGrpId].lEndN = grpIt->lEndN;
2991  groupConfigs[netId][lGrpId].LtoGOffset = grpIt->LtoGOffset;
2992  groupConfigs[netId][lGrpId].GtoLOffset = grpIt->GtoLOffset;
2993  groupConfigs[netId][lGrpId].Type = groupConfigMap[gGrpId].type;
2994  groupConfigs[netId][lGrpId].numN = groupConfigMap[gGrpId].numN;
2995  groupConfigs[netId][lGrpId].numPostSynapses = grpIt->numPostSynapses;
2996  groupConfigs[netId][lGrpId].numPreSynapses = grpIt->numPreSynapses;
2997  groupConfigs[netId][lGrpId].isSpikeGenerator = groupConfigMap[gGrpId].isSpikeGenerator;
2998  groupConfigs[netId][lGrpId].isSpikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc != NULL ? true : false;
2999  groupConfigs[netId][lGrpId].WithSTP = groupConfigMap[gGrpId].stpConfig.WithSTP;
3000  groupConfigs[netId][lGrpId].WithSTDP = groupConfigMap[gGrpId].stdpConfig.WithSTDP;
3001  groupConfigs[netId][lGrpId].WithESTDP = groupConfigMap[gGrpId].stdpConfig.WithESTDP;
3002  groupConfigs[netId][lGrpId].WithISTDP = groupConfigMap[gGrpId].stdpConfig.WithISTDP;
3003  groupConfigs[netId][lGrpId].WithESTDPtype = groupConfigMap[gGrpId].stdpConfig.WithESTDPtype;
3004  groupConfigs[netId][lGrpId].WithISTDPtype = groupConfigMap[gGrpId].stdpConfig.WithISTDPtype;
3005  groupConfigs[netId][lGrpId].WithESTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithESTDPcurve;
3006  groupConfigs[netId][lGrpId].WithISTDPcurve = groupConfigMap[gGrpId].stdpConfig.WithISTDPcurve;
3007  groupConfigs[netId][lGrpId].WithHomeostasis = groupConfigMap[gGrpId].homeoConfig.WithHomeostasis;
3008  groupConfigs[netId][lGrpId].FixedInputWts = grpIt->fixedInputWts;
3009  groupConfigs[netId][lGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3010  groupConfigs[netId][lGrpId].Noffset = grpIt->Noffset; // Note: Noffset is not valid at this time
3011  groupConfigs[netId][lGrpId].MaxDelay = grpIt->maxOutgoingDelay;
3012  groupConfigs[netId][lGrpId].STP_A = groupConfigMap[gGrpId].stpConfig.STP_A;
3013  groupConfigs[netId][lGrpId].STP_U = groupConfigMap[gGrpId].stpConfig.STP_U;
3014  groupConfigs[netId][lGrpId].STP_tau_u_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_u_inv;
3015  groupConfigs[netId][lGrpId].STP_tau_x_inv = groupConfigMap[gGrpId].stpConfig.STP_tau_x_inv;
3016  groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_EXC;
3017  groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_EXC;
3018  groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_EXC;
3019  groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_EXC;
3020  groupConfigs[netId][lGrpId].GAMMA = groupConfigMap[gGrpId].stdpConfig.GAMMA;
3021  groupConfigs[netId][lGrpId].KAPPA = groupConfigMap[gGrpId].stdpConfig.KAPPA;
3022  groupConfigs[netId][lGrpId].OMEGA = groupConfigMap[gGrpId].stdpConfig.OMEGA;
3023  groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_PLUS_INV_INB;
3024  groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB = groupConfigMap[gGrpId].stdpConfig.TAU_MINUS_INV_INB;
3025  groupConfigs[netId][lGrpId].ALPHA_PLUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_PLUS_INB;
3026  groupConfigs[netId][lGrpId].ALPHA_MINUS_INB = groupConfigMap[gGrpId].stdpConfig.ALPHA_MINUS_INB;
3027  groupConfigs[netId][lGrpId].BETA_LTP = groupConfigMap[gGrpId].stdpConfig.BETA_LTP;
3028  groupConfigs[netId][lGrpId].BETA_LTD = groupConfigMap[gGrpId].stdpConfig.BETA_LTD;
3029  groupConfigs[netId][lGrpId].LAMBDA = groupConfigMap[gGrpId].stdpConfig.LAMBDA;
3030  groupConfigs[netId][lGrpId].DELTA = groupConfigMap[gGrpId].stdpConfig.DELTA;
3031 
3032  groupConfigs[netId][lGrpId].numCompNeighbors = 0;
3033  groupConfigs[netId][lGrpId].withCompartments = groupConfigMap[gGrpId].withCompartments;
3034  groupConfigs[netId][lGrpId].compCouplingUp = groupConfigMap[gGrpId].compCouplingUp;
3035  groupConfigs[netId][lGrpId].compCouplingDown = groupConfigMap[gGrpId].compCouplingDown;
3036  memset(&groupConfigs[netId][lGrpId].compNeighbors, 0, sizeof(groupConfigs[netId][lGrpId].compNeighbors[0])*MAX_NUM_COMP_CONN);
3037  memset(&groupConfigs[netId][lGrpId].compCoupling, 0, sizeof(groupConfigs[netId][lGrpId].compCoupling[0])*MAX_NUM_COMP_CONN);
3038 
3040  groupConfigs[netId][lGrpId].avgTimeScale = groupConfigMap[gGrpId].homeoConfig.avgTimeScale;
3041  groupConfigs[netId][lGrpId].avgTimeScale_decay = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleDecay;
3042  groupConfigs[netId][lGrpId].avgTimeScaleInv = groupConfigMap[gGrpId].homeoConfig.avgTimeScaleInv;
3043  groupConfigs[netId][lGrpId].homeostasisScale = groupConfigMap[gGrpId].homeoConfig.homeostasisScale;
3044 
3045  // parameters of neuromodulator
3046  groupConfigs[netId][lGrpId].baseDP = groupConfigMap[gGrpId].neuromodulatorConfig.baseDP;
3047  groupConfigs[netId][lGrpId].base5HT = groupConfigMap[gGrpId].neuromodulatorConfig.base5HT;
3048  groupConfigs[netId][lGrpId].baseACh = groupConfigMap[gGrpId].neuromodulatorConfig.baseACh;
3049  groupConfigs[netId][lGrpId].baseNE = groupConfigMap[gGrpId].neuromodulatorConfig.baseNE;
3050  groupConfigs[netId][lGrpId].decayDP = groupConfigMap[gGrpId].neuromodulatorConfig.decayDP;
3051  groupConfigs[netId][lGrpId].decay5HT = groupConfigMap[gGrpId].neuromodulatorConfig.decay5HT;
3052  groupConfigs[netId][lGrpId].decayACh = groupConfigMap[gGrpId].neuromodulatorConfig.decayACh;
3053  groupConfigs[netId][lGrpId].decayNE = groupConfigMap[gGrpId].neuromodulatorConfig.decayNE;
3054 
3055  // sync groupConfigs[][] and groupConfigMDMap[]
3056  if (netId == grpIt->netId) {
3057  groupConfigMDMap[gGrpId].netId = grpIt->netId;
3058  groupConfigMDMap[gGrpId].gGrpId = grpIt->gGrpId;
3059  groupConfigMDMap[gGrpId].gStartN = grpIt->gStartN;
3060  groupConfigMDMap[gGrpId].gEndN = grpIt->gEndN;
3061  groupConfigMDMap[gGrpId].lGrpId = grpIt->lGrpId;
3062  groupConfigMDMap[gGrpId].lStartN = grpIt->lStartN;
3063  groupConfigMDMap[gGrpId].lEndN = grpIt->lEndN;
3064  groupConfigMDMap[gGrpId].numPostSynapses = grpIt->numPostSynapses;
3065  groupConfigMDMap[gGrpId].numPreSynapses = grpIt->numPreSynapses;
3066  groupConfigMDMap[gGrpId].LtoGOffset = grpIt->LtoGOffset;
3067  groupConfigMDMap[gGrpId].GtoLOffset = grpIt->GtoLOffset;
3068  groupConfigMDMap[gGrpId].fixedInputWts = grpIt->fixedInputWts;
3069  groupConfigMDMap[gGrpId].hasExternalConnect = grpIt->hasExternalConnect;
3070  groupConfigMDMap[gGrpId].Noffset = grpIt->Noffset; // Note: Noffset is not valid at this time
3071  groupConfigMDMap[gGrpId].maxOutgoingDelay = grpIt->maxOutgoingDelay;
3072  }
3073  groupConfigs[netId][lGrpId].withParamModel_9 = groupConfigMap[gGrpId].withParamModel_9;
3074  groupConfigs[netId][lGrpId].isLIF = groupConfigMap[gGrpId].isLIF;
3075 
3076  }
3077 
3078  // FIXME: How does networkConfigs[netId].numGroups be availabe at this time?! Bug?!
3079  //int numNSpikeGen = 0;
3080  //for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
3081  // if (netId == groupConfigs[netId][lGrpId].netId && groupConfigs[netId][lGrpId].isSpikeGenerator && groupConfigs[netId][lGrpId].isSpikeGenFunc) {
3082  // // we only need numNSpikeGen for spike generator callbacks that need to transfer their spikes to the GPU
3083  // groupConfigs[netId][lGrpId].Noffset = numNSpikeGen; // FIXME, Noffset is updated after publish group configs
3084  // numNSpikeGen += groupConfigs[netId][lGrpId].numN;
3085  // }
3086  //}
3087  //assert(numNSpikeGen <= networkConfigs[netId].numNPois);
3088  }
3089 }
3090 
3091 void SNN::generateRuntimeConnectConfigs() {
3092  // sync localConnectLists and connectConfigMap
3093  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3094  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3095  connectConfigMap[connIt->connId] = *connIt;
3096  }
3097 
3098  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3099  connectConfigMap[connIt->connId] = *connIt;
3100  }
3101  }
3102 }
3103 
3104 void SNN::generateRuntimeNetworkConfigs() {
3105  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3106  if (!groupPartitionLists[netId].empty()) {
3107  // copy the global network config to local network configs
3108  // global configuration for maximum axonal delay
3109  networkConfigs[netId].maxDelay = glbNetworkConfig.maxDelay;
3110 
3111  // configurations for execution features
3112  networkConfigs[netId].sim_with_fixedwts = sim_with_fixedwts;
3113  networkConfigs[netId].sim_with_conductances = sim_with_conductances;
3114  networkConfigs[netId].sim_with_homeostasis = sim_with_homeostasis;
3115  networkConfigs[netId].sim_with_stdp = sim_with_stdp;
3116  networkConfigs[netId].sim_with_stp = sim_with_stp;
3117  networkConfigs[netId].sim_in_testing = sim_in_testing;
3118 
3119  // search for active neuron monitor
3120  networkConfigs[netId].sim_with_nm = false;
3121  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3122  if (grpIt->netId == netId && grpIt->neuronMonitorId >= 0)
3123  networkConfigs[netId].sim_with_nm = true;
3124  }
3125 
3126  // stdp, da-stdp configurations
3127  networkConfigs[netId].stdpScaleFactor = stdpScaleFactor_;
3128  networkConfigs[netId].wtChangeDecay = wtChangeDecay_;
3129 
3130  // conductance configurations
3131  networkConfigs[netId].sim_with_NMDA_rise = sim_with_NMDA_rise;
3132  networkConfigs[netId].sim_with_GABAb_rise = sim_with_GABAb_rise;
3133  networkConfigs[netId].dAMPA = dAMPA;
3134  networkConfigs[netId].rNMDA = rNMDA;
3135  networkConfigs[netId].dNMDA = dNMDA;
3136  networkConfigs[netId].sNMDA = sNMDA;
3137  networkConfigs[netId].dGABAa = dGABAa;
3138  networkConfigs[netId].rGABAb = rGABAb;
3139  networkConfigs[netId].dGABAb = dGABAb;
3140  networkConfigs[netId].sGABAb = sGABAb;
3141 
3142  networkConfigs[netId].simIntegrationMethod = glbNetworkConfig.simIntegrationMethod;
3143  networkConfigs[netId].simNumStepsPerMs = glbNetworkConfig.simNumStepsPerMs;
3144  networkConfigs[netId].timeStep = glbNetworkConfig.timeStep;
3145 
3146  // configurations for boundries of neural types
3147  findNumN(netId, networkConfigs[netId].numN, networkConfigs[netId].numNExternal, networkConfigs[netId].numNAssigned,
3148  networkConfigs[netId].numNReg, networkConfigs[netId].numNExcReg, networkConfigs[netId].numNInhReg,
3149  networkConfigs[netId].numNPois, networkConfigs[netId].numNExcPois, networkConfigs[netId].numNInhPois);
3150 
3151  // configurations for assigned groups and connections
3152  networkConfigs[netId].numGroups = 0;
3153  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3154  if (grpIt->netId == netId)
3155  networkConfigs[netId].numGroups++;
3156  }
3157  networkConfigs[netId].numGroupsAssigned = groupPartitionLists[netId].size();
3158  //networkConfigs[netId].numConnections = localConnectLists[netId].size();
3159  //networkConfigs[netId].numAssignedConnections = localConnectLists[netId].size() + externalConnectLists[netId].size();
3160  //networkConfigs[netId].numConnections = localConnectLists[netId].size() + externalConnectLists[netId].size();
3161  networkConfigs[netId].numConnections = connectConfigMap.size();// temporarily solution: copy all connection info to each GPU
3162 
3163  // find the maximum number of pre- and post-connections among neurons
3164  // SNN::maxNumPreSynN and SNN::maxNumPostSynN are updated
3165  findMaxNumSynapsesNeurons(netId, networkConfigs[netId].maxNumPostSynN, networkConfigs[netId].maxNumPreSynN);
3166 
3167  // find the maximum number of spikes in D1 (i.e., maxDelay == 1) and D2 (i.e., maxDelay >= 2) sets
3168  findMaxSpikesD1D2(netId, networkConfigs[netId].maxSpikesD1, networkConfigs[netId].maxSpikesD2);
3169 
3170  // find the total number of synapses in the network
3171  findNumSynapsesNetwork(netId, networkConfigs[netId].numPostSynNet, networkConfigs[netId].numPreSynNet);
3172 
3173  // find out number of user-defined spike gen and update Noffset of each group config
3174  // Note: groupConfigs[][].Noffset is valid at this time
3175  findNumNSpikeGenAndOffset(netId);
3176  }
3177  }
3178 
3179  // find manager runtime data size, which is sufficient to hold the data of any gpu runtime
3180  memset(&managerRTDSize, 0, sizeof(ManagerRuntimeDataSize));
3181  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3182  if (!groupPartitionLists[netId].empty()) {
3183  // find the maximum number of numN, numNReg ,and numNAssigned among local networks
3184  if (networkConfigs[netId].numNReg > managerRTDSize.maxNumNReg) managerRTDSize.maxNumNReg = networkConfigs[netId].numNReg;
3185  if (networkConfigs[netId].numN > managerRTDSize.maxNumN) managerRTDSize.maxNumN = networkConfigs[netId].numN;
3186  if (networkConfigs[netId].numNAssigned > managerRTDSize.maxNumNAssigned) managerRTDSize.maxNumNAssigned = networkConfigs[netId].numNAssigned;
3187 
3188  // find the maximum number of numNSpikeGen among local networks
3189  if (networkConfigs[netId].numNSpikeGen > managerRTDSize.maxNumNSpikeGen) managerRTDSize.maxNumNSpikeGen = networkConfigs[netId].numNSpikeGen;
3190 
3191  // find the maximum number of numGroups and numConnections among local networks
3192  if (networkConfigs[netId].numGroups > managerRTDSize.maxNumGroups) managerRTDSize.maxNumGroups = networkConfigs[netId].numGroups;
3193  if (networkConfigs[netId].numConnections > managerRTDSize.maxNumConnections) managerRTDSize.maxNumConnections = networkConfigs[netId].numConnections;
3194 
3195  // find the maximum number of neurons in a group among local networks
3196  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3197  if (groupConfigMap[grpIt->gGrpId].numN > managerRTDSize.maxNumNPerGroup) managerRTDSize.maxNumNPerGroup = groupConfigMap[grpIt->gGrpId].numN;
3198  }
3199 
3200  // find the maximum number of maxSipkesD1(D2) among networks
3201  if (networkConfigs[netId].maxSpikesD1 > managerRTDSize.maxMaxSpikeD1) managerRTDSize.maxMaxSpikeD1 = networkConfigs[netId].maxSpikesD1;
3202  if (networkConfigs[netId].maxSpikesD2 > managerRTDSize.maxMaxSpikeD2) managerRTDSize.maxMaxSpikeD2 = networkConfigs[netId].maxSpikesD2;
3203 
3204  // find the maximum number of total # of pre- and post-connections among local networks
3205  if (networkConfigs[netId].numPreSynNet > managerRTDSize.maxNumPreSynNet) managerRTDSize.maxNumPreSynNet = networkConfigs[netId].numPreSynNet;
3206  if (networkConfigs[netId].numPostSynNet > managerRTDSize.maxNumPostSynNet) managerRTDSize.maxNumPostSynNet = networkConfigs[netId].numPostSynNet;
3207 
3208  // find the number of numN, and numNReg in the global network
3209  managerRTDSize.glbNumN += networkConfigs[netId].numN;
3210  managerRTDSize.glbNumNReg += networkConfigs[netId].numNReg;
3211  }
3212  }
3213 }
3214 
3215 bool compareSrcNeuron(const ConnectionInfo& first, const ConnectionInfo& second) {
3216  return (first.nSrc + first.srcGLoffset < second.nSrc + second.srcGLoffset);
3217 }
3218 
3219 bool compareDelay(const ConnectionInfo& first, const ConnectionInfo& second) {
3220  return (first.delay < second.delay);
3221 }
3222 
3223 // Note: ConnectInfo stored in connectionList use global ids
3224 void SNN::generateConnectionRuntime(int netId) {
3225  std::map<int, int> GLoffset; // global nId to local nId offset
3226  std::map<int, int> GLgrpId; // global grpId to local grpId offset
3227 
3228  // load offset between global neuron id and local neuron id
3229  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3230  GLoffset[grpIt->gGrpId] = grpIt->GtoLOffset;
3231  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3232  }
3233  // FIXME: connId is global connId, use connectConfigs[netId][local connId] instead,
3234  // FIXME; but note connectConfigs[netId][] are NOT complete, lack of exeternal incoming connections
3235  // generate mulSynFast, mulSynSlow in connection-centric array
3236  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3237  // store scaling factors for synaptic currents in connection-centric array
3238  mulSynFast[connIt->second.connId] = connIt->second.mulSynFast;
3239  mulSynSlow[connIt->second.connId] = connIt->second.mulSynSlow;
3240  }
3241 
3242  // parse ConnectionInfo stored in connectionLists[0]
3243  // note: ConnectInfo stored in connectionList use global ids
3244  // generate Npost, Npre, Npre_plastic
3245  int parsedConnections = 0;
3246  memset(managerRuntimeData.Npost, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3247  memset(managerRuntimeData.Npre, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3248  memset(managerRuntimeData.Npre_plastic, 0, sizeof(short) * networkConfigs[netId].numNAssigned);
3249  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3250  connIt->srcGLoffset = GLoffset[connIt->grpSrc];
3251  if (managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] == SYNAPSE_ID_MASK) {
3252  KERNEL_ERROR("Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)", SYNAPSE_ID_MASK, connIt->nSrc, connIt->grpSrc);
3254  }
3255  if (managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]] == SYNAPSE_ID_MASK) {
3256  KERNEL_ERROR("Error: the number of synapses exceeds maximum limit (%d) for neuron %d (group %d)", SYNAPSE_ID_MASK, connIt->nDest, connIt->grpDest);
3258  }
3259  managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3260  managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3261 
3262  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_PLASTIC) {
3263  sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
3264  managerRuntimeData.Npre_plastic[connIt->nDest + GLoffset[connIt->grpDest]]++;
3265 
3266  // homeostasis
3267  if (groupConfigMap[connIt->grpDest].homeoConfig.WithHomeostasis && groupConfigMDMap[connIt->grpDest].homeoId == -1)
3268  groupConfigMDMap[connIt->grpDest].homeoId = connIt->nDest + GLoffset[connIt->grpDest]; // this neuron info will be printed
3269 
3270  // old access to homeostasis
3271  //if (groupConfigs[netId][GLgrpId[it->grpDest]].WithHomeostasis && groupConfigs[netId][GLgrpId[it->grpDest]].homeoId == -1)
3272  // groupConfigs[netId][GLgrpId[it->grpDest]].homeoId = it->nDest + GLoffset[it->grpDest]; // this neuron info will be printed
3273  }
3274 
3275  // generate the delay vaule
3276  //it->delay = connectConfigMap[it->connId].minDelay + rand() % (connectConfigMap[it->connId].maxDelay - connectConfigMap[it->connId].minDelay + 1);
3277  //assert((it->delay >= connectConfigMap[it->connId].minDelay) && (it->delay <= connectConfigMap[it->connId].maxDelay));
3278  // generate the max weight and initial weight
3279  //float initWt = generateWeight(connectConfigMap[it->connId].connProp, connectConfigMap[it->connId].initWt, connectConfigMap[it->connId].maxWt, it->nSrc, it->grpSrc);
3280  //float initWt = connectConfigMap[it->connId].initWt;
3281  //float maxWt = connectConfigMap[it->connId].maxWt;
3282  // adjust sign of weight based on pre-group (negative if pre is inhibitory)
3283  // this access is fine, isExcitatoryGroup() use global grpId
3284  //it->maxWt = isExcitatoryGroup(it->grpSrc) ? fabs(maxWt) : -1.0 * fabs(maxWt);
3285  //it->initWt = isExcitatoryGroup(it->grpSrc) ? fabs(initWt) : -1.0 * fabs(initWt);
3286 
3287  parsedConnections++;
3288  }
3289  assert(parsedConnections == networkConfigs[netId].numPostSynNet && parsedConnections == networkConfigs[netId].numPreSynNet);
3290 
3291  // generate cumulativePost and cumulativePre
3292  managerRuntimeData.cumulativePost[0] = 0;
3293  managerRuntimeData.cumulativePre[0] = 0;
3294  for (int lNId = 1; lNId < networkConfigs[netId].numNAssigned; lNId++) {
3295  managerRuntimeData.cumulativePost[lNId] = managerRuntimeData.cumulativePost[lNId - 1] + managerRuntimeData.Npost[lNId - 1];
3296  managerRuntimeData.cumulativePre[lNId] = managerRuntimeData.cumulativePre[lNId - 1] + managerRuntimeData.Npre[lNId - 1];
3297  }
3298 
3299  // generate preSynapticIds, parse plastic connections first
3300  memset(managerRuntimeData.Npre, 0, sizeof(short) * networkConfigs[netId].numNAssigned); // reset managerRuntimeData.Npre to zero, so that it can be used as synId
3301  parsedConnections = 0;
3302  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3303  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_PLASTIC) {
3304  int pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3305  assert(pre_pos < networkConfigs[netId].numPreSynNet);
3306 
3307  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc])); // managerRuntimeData.Npost[it->nSrc] is not availabe at this parse
3308  connIt->preSynId = managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]; // save managerRuntimeData.Npre[it->nDest] as synId
3309 
3310  managerRuntimeData.Npre[connIt->nDest+ GLoffset[connIt->grpDest]]++;
3311  parsedConnections++;
3312 
3313  // update the maximum number of and pre-connections of a neuron in a group
3314  //if (managerRuntimeData.Npre[it->nDest] > groupInfo[it->grpDest].maxPreConn)
3315  // groupInfo[it->grpDest].maxPreConn = managerRuntimeData.Npre[it->nDest];
3316  }
3317  }
3318  // parse fixed connections
3319  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[netId].begin(); connIt != connectionLists[netId].end(); connIt++) {
3320  if (GET_FIXED_PLASTIC(connectConfigMap[connIt->connId].connProp) == SYN_FIXED) {
3321  int pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]];
3322  assert(pre_pos < networkConfigs[netId].numPreSynNet);
3323 
3324  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), 0, (GLgrpId[connIt->grpSrc])); // managerRuntimeData.Npost[it->nSrc] is not availabe at this parse
3325  connIt->preSynId = managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]; // save managerRuntimeData.Npre[it->nDest] as synId
3326 
3327  managerRuntimeData.Npre[connIt->nDest + GLoffset[connIt->grpDest]]++;
3328  parsedConnections++;
3329 
3330  // update the maximum number of and pre-connections of a neuron in a group
3331  //if (managerRuntimeData.Npre[it->nDest] > groupInfo[it->grpDest].maxPreConn)
3332  // groupInfo[it->grpDest].maxPreConn = managerRuntimeData.Npre[it->nDest];
3333  }
3334  }
3335  assert(parsedConnections == networkConfigs[netId].numPreSynNet);
3336  //printf("parsed pre connections %d\n", parsedConnections);
3337 
3338  // generate postSynapticIds
3339  connectionLists[netId].sort(compareSrcNeuron); // sort by local nSrc id
3340  memset(managerRuntimeData.postDelayInfo, 0, sizeof(DelayInfo) * (networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1)));
3341  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) { // pre-neuron order, local nId
3342  if (managerRuntimeData.Npost[lNId] > 0) {
3343  std::list<ConnectionInfo> postConnectionList;
3344  ConnectionInfo targetConn;
3345  targetConn.nSrc = lNId ; // the other fields does not matter, use local nid to search
3346 
3347  std::list<ConnectionInfo>::iterator firstPostConn = std::find(connectionLists[netId].begin(), connectionLists[netId].end(), targetConn);
3348  std::list<ConnectionInfo>::iterator lastPostConn = firstPostConn;
3349  std::advance(lastPostConn, managerRuntimeData.Npost[lNId]);
3350  managerRuntimeData.Npost[lNId] = 0; // reset managerRuntimeData.Npost[lNId] to zero, so that it can be used as synId
3351 
3352  postConnectionList.splice(postConnectionList.begin(), connectionLists[netId], firstPostConn, lastPostConn);
3353  postConnectionList.sort(compareDelay);
3354 
3355  int post_pos, pre_pos, lastDelay = 0;
3356  parsedConnections = 0;
3357  //memset(&managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1)], 0, sizeof(DelayInfo) * (glbNetworkConfig.maxDelay + 1));
3358  for (std::list<ConnectionInfo>::iterator connIt = postConnectionList.begin(); connIt != postConnectionList.end(); connIt++) {
3359  assert(connIt->nSrc + GLoffset[connIt->grpSrc] == lNId);
3360  post_pos = managerRuntimeData.cumulativePost[connIt->nSrc + GLoffset[connIt->grpSrc]] + managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3361  pre_pos = managerRuntimeData.cumulativePre[connIt->nDest + GLoffset[connIt->grpDest]] + connIt->preSynId;
3362 
3363  assert(post_pos < networkConfigs[netId].numPostSynNet);
3364  //assert(pre_pos < numPreSynNet);
3365 
3366  // generate a post synaptic id for the current connection
3367  managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID((connIt->nDest + GLoffset[connIt->grpDest]), connIt->preSynId, (GLgrpId[connIt->grpDest]));// used stored managerRuntimeData.Npre[it->nDest] in it->preSynId
3368  // generate a delay look up table by the way
3369  assert(connIt->delay > 0);
3370  if (connIt->delay > lastDelay) {
3371  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_index_start = managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]];
3372  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_length++;
3373  } else if (connIt->delay == lastDelay) {
3374  managerRuntimeData.postDelayInfo[lNId * (glbNetworkConfig.maxDelay + 1) + connIt->delay - 1].delay_length++;
3375  } else {
3376  KERNEL_ERROR("Post-synaptic delays not sorted correctly... pre_id=%d, delay[%d]=%d, delay[%d]=%d",
3377  lNId, managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], connIt->delay, managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]] - 1, lastDelay);
3378  }
3379  lastDelay = connIt->delay;
3380 
3381  // update the corresponding pre synaptic id
3382  SynInfo preId = managerRuntimeData.preSynapticIds[pre_pos];
3383  assert(GET_CONN_NEURON_ID(preId) == connIt->nSrc + GLoffset[connIt->grpSrc]);
3384  //assert(GET_CONN_GRP_ID(preId) == it->grpSrc);
3385  managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID((connIt->nSrc + GLoffset[connIt->grpSrc]), managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]], (GLgrpId[connIt->grpSrc]));
3386  managerRuntimeData.wt[pre_pos] = connIt->initWt;
3387  managerRuntimeData.maxSynWt[pre_pos] = connIt->maxWt;
3388  managerRuntimeData.connIdsPreIdx[pre_pos] = connIt->connId;
3389 
3390  managerRuntimeData.Npost[connIt->nSrc + GLoffset[connIt->grpSrc]]++;
3391  parsedConnections++;
3392 
3393  // update the maximum number of and post-connections of a neuron in a group
3394  //if (managerRuntimeData.Npost[it->nSrc] > groupInfo[it->grpSrc].maxPostConn)
3395  // groupInfo[it->grpSrc].maxPostConn = managerRuntimeData.Npost[it->nSrc];
3396  }
3397  assert(parsedConnections == managerRuntimeData.Npost[lNId]);
3398  //printf("parsed post connections %d\n", parsedConnections);
3399  // note: elements in postConnectionList are deallocated automatically with postConnectionList
3400  /* for postDelayInfo debugging
3401  printf("%d ", lNId);
3402  for (int t = 0; t < maxDelay_ + 1; t ++) {
3403  printf("[%d,%d]",
3404  managerRuntimeData.postDelayInfo[lNId * (maxDelay_ + 1) + t].delay_index_start,
3405  managerRuntimeData.postDelayInfo[lNId * (maxDelay_ + 1) + t].delay_length);
3406  }
3407  printf("\n");
3408  */
3409  }
3410  }
3411  assert(connectionLists[netId].empty());
3412 
3413  //int p = managerRuntimeData.Npost[src];
3414 
3415  //assert(managerRuntimeData.Npost[src] >= 0);
3416  //assert(managerRuntimeData.Npre[dest] >= 0);
3417  //assert((src * maxNumPostSynGrp + p) / numN < maxNumPostSynGrp); // divide by numN to prevent INT overflow
3418 
3419  //unsigned int post_pos = managerRuntimeData.cumulativePost[src] + managerRuntimeData.Npost[src];
3420  //unsigned int pre_pos = managerRuntimeData.cumulativePre[dest] + managerRuntimeData.Npre[dest];
3421 
3422  //assert(post_pos < numPostSynNet);
3423  //assert(pre_pos < numPreSynNet);
3424 
3426  //managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID(dest, managerRuntimeData.Npre[dest], destGrp);
3427  //tmp_SynapticDelay[post_pos] = dVal;
3428 
3429  //managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID(src, managerRuntimeData.Npost[src], srcGrp);
3430  //managerRuntimeData.wt[pre_pos] = synWt;
3431  //managerRuntimeData.maxSynWt[pre_pos] = maxWt;
3432  //managerRuntimeData.connIdsPreIdx[pre_pos] = connId;
3433 
3434  //bool synWtType = GET_FIXED_PLASTIC(connProp);
3435 
3436  //if (synWtType == SYN_PLASTIC) {
3437  // sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
3438  // managerRuntimeData.Npre_plastic[dest]++;
3439  // // homeostasis
3440  // if (groupConfigs[0][destGrp].WithHomeostasis && groupConfigs[0][destGrp].homeoId ==-1)
3441  // groupConfigs[0][destGrp].homeoId = dest; // this neuron info will be printed
3442  //}
3443 
3444  //managerRuntimeData.Npre[dest] += 1;
3445  //managerRuntimeData.Npost[src] += 1;
3446 
3447  //groupInfo[srcGrp].numPostConn++;
3448  //groupInfo[destGrp].numPreConn++;
3449 
3451  //if (managerRuntimeData.Npost[src] > groupInfo[srcGrp].maxPostConn)
3452  // groupInfo[srcGrp].maxPostConn = managerRuntimeData.Npost[src];
3453  //if (managerRuntimeData.Npre[dest] > groupInfo[destGrp].maxPreConn)
3454  // groupInfo[destGrp].maxPreConn = managerRuntimeData.Npre[src];
3455 }
3456 
3457 void SNN::generateCompConnectionRuntime(int netId)
3458 {
3459  std::map<int, int> GLgrpId; // global grpId to local grpId offset
3460 
3461  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
3462  GLgrpId[grpIt->gGrpId] = grpIt->lGrpId;
3463  //printf("Global group id %i; Local group id %i\n", grpIt->gGrpId, grpIt->lGrpId);
3464  }
3465 
3466  //printf("The current netid is: %i\n", netId);
3467 
3468  for (std::list<compConnectConfig>::iterator connIt = localCompConnectLists[netId].begin(); connIt != localCompConnectLists[netId].end(); connIt++) {
3469  //printf("The size of localCompConnectLists is: %i\n", localCompConnectLists[netId].size());
3470  int grpLower = connIt->grpSrc;
3471  int grpUpper = connIt->grpDest;
3472 
3473  int i = groupConfigs[netId][GLgrpId[grpLower]].numCompNeighbors;
3474  if (i >= MAX_NUM_COMP_CONN) {
3475  KERNEL_ERROR("Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3476  groupConfigMap[grpLower].grpName.c_str(), grpLower, (int)MAX_NUM_COMP_CONN);
3477  exitSimulation(1);
3478  }
3479  groupConfigs[netId][GLgrpId[grpLower]].compNeighbors[i] = grpUpper;
3480  groupConfigs[netId][GLgrpId[grpLower]].compCoupling[i] = groupConfigs[netId][GLgrpId[grpUpper]].compCouplingDown; // get down-coupling from upper neighbor
3481  groupConfigs[netId][GLgrpId[grpLower]].numCompNeighbors++;
3482 
3483  int j = groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors;
3484  if (j >= MAX_NUM_COMP_CONN) {
3485  KERNEL_ERROR("Group %s(%d) exceeds max number of allowed compartmental connections (%d).",
3486  groupConfigMap[grpUpper].grpName.c_str(), grpUpper, (int)MAX_NUM_COMP_CONN);
3487  exitSimulation(1);
3488  }
3489  groupConfigs[netId][GLgrpId[grpUpper]].compNeighbors[j] = grpLower;
3490  groupConfigs[netId][GLgrpId[grpUpper]].compCoupling[j] = groupConfigs[netId][GLgrpId[grpLower]].compCouplingUp; // get up-coupling from lower neighbor
3491  groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors++;
3492 
3493  //printf("Group %i (local group %i) has %i compartmental neighbors!\n", grpUpper, GLgrpId[grpUpper], groupConfigs[netId][GLgrpId[grpUpper]].numCompNeighbors);
3494  }
3495 }
3496 
3497 
3498 void SNN::generatePoissonGroupRuntime(int netId, int lGrpId) {
3499  for(int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
3500  resetPoissonNeuron(netId, lGrpId, lNId);
3501 }
3502 
3503 
3504 void SNN::collectGlobalNetworkConfigC() {
3505  // scan all connect configs to find the maximum delay in the global network, update glbNetworkConfig.maxDelay
3506  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3507  if (connIt->second.maxDelay > glbNetworkConfig.maxDelay)
3508  glbNetworkConfig.maxDelay = connIt->second.maxDelay;
3509  }
3510  assert(connectConfigMap.size() > 0 || glbNetworkConfig.maxDelay != -1);
3511 
3512  // scan all group configs to find the number of (reg, pois, exc, inh) neuron in the global network
3513  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3514  if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3515  glbNetworkConfig.numNExcPois += groupConfigMap[gGrpId].numN;
3516  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3517  glbNetworkConfig.numNInhPois += groupConfigMap[gGrpId].numN;
3518  } else if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3519  glbNetworkConfig.numNExcReg += groupConfigMap[gGrpId].numN;
3520  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON)) {
3521  glbNetworkConfig.numNInhReg += groupConfigMap[gGrpId].numN;
3522  }
3523 
3524  if (groupConfigMDMap[gGrpId].maxOutgoingDelay == 1)
3525  glbNetworkConfig.numN1msDelay += groupConfigMap[gGrpId].numN;
3526  else if (groupConfigMDMap[gGrpId].maxOutgoingDelay >= 2)
3527  glbNetworkConfig.numN2msDelay += groupConfigMap[gGrpId].numN;
3528  }
3529 
3530  glbNetworkConfig.numNReg = glbNetworkConfig.numNExcReg + glbNetworkConfig.numNInhReg;
3531  glbNetworkConfig.numNPois = glbNetworkConfig.numNExcPois + glbNetworkConfig.numNInhPois;
3532  glbNetworkConfig.numN = glbNetworkConfig.numNReg + glbNetworkConfig.numNPois;
3533 }
3534 
3535 
3536 void SNN::collectGlobalNetworkConfigP() {
3537  // print group and connection overview
3538  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3539  if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
3540  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
3541  glbNetworkConfig.numSynNet += connIt->numberOfConnections;
3542 
3543  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
3544  glbNetworkConfig.numSynNet += connIt->numberOfConnections;
3545  }
3546  }
3547 }
3548 
3549 // after all the initalization. Its time to create the synaptic weights, weight change and also
3550 // time of firing these are the mostly costly arrays so dense packing is essential to minimize wastage of space
3551 void SNN::compileSNN() {
3552  KERNEL_DEBUG("Beginning compilation of the network....");
3553 
3554  // compile (update) group and connection configs according to their mutual information
3555  // update GroupConfig::MaxDelay GroupConfig::FixedInputWts
3556  // assign GroupConfig::StartN and GroupConfig::EndN
3557  // Note: MaxDelay, FixedInputWts, StartN, and EndN are invariant in single-GPU or multi-GPUs mode
3558  compileGroupConfig();
3559 
3560  compileConnectConfig(); // for future use
3561 
3562  // collect the global network config according to compiled gorup and connection configs
3563  // collect SNN::maxDelay_
3564  collectGlobalNetworkConfigC();
3565 
3566  // perform various consistency checks:
3567  // - numNeurons vs. sum of all neurons
3568  // - STDP set on a post-group with incoming plastic connections
3569  // - etc.
3570  verifyNetwork();
3571 
3572  // display the global network configuration
3573  KERNEL_INFO("\n");
3574  KERNEL_INFO("************************** Global Network Configuration *******************************");
3575  KERNEL_INFO("The number of neurons in the network (numN) = %d", glbNetworkConfig.numN);
3576  KERNEL_INFO("The number of regular neurons in the network (numNReg:numNExcReg:numNInhReg) = %d:%d:%d", glbNetworkConfig.numNReg, glbNetworkConfig.numNExcReg, glbNetworkConfig.numNInhReg);
3577  KERNEL_INFO("The number of poisson neurons in the network (numNPois:numNExcPois:numInhPois) = %d:%d:%d", glbNetworkConfig.numNPois, glbNetworkConfig.numNExcPois, glbNetworkConfig.numNInhPois);
3578  KERNEL_INFO("The maximum axonal delay in the network (maxDelay) = %d", glbNetworkConfig.maxDelay);
3579 
3580  //ensure that we dont compile the network again
3581  snnState = COMPILED_SNN;
3582 }
3583 
3584 void SNN::compileConnectConfig() {
3585  // for future use
3586 }
3587 
3588 void SNN::compileGroupConfig() {
3589  int grpSrc;
3590  bool synWtType;
3591 
3592  // find the maximum delay for each group according to incoming connection
3593  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
3594  // check if the current connection's delay meaning grpSrc's delay
3595  // is greater than the MaxDelay for grpSrc. We find the maximum
3596  // delay for the grpSrc by this scheme.
3597  grpSrc = connIt->second.grpSrc;
3598  if (connIt->second.maxDelay > groupConfigMDMap[grpSrc].maxOutgoingDelay)
3599  groupConfigMDMap[grpSrc].maxOutgoingDelay = connIt->second.maxDelay;
3600 
3601  // given group has plastic connection, and we need to apply STDP rule...
3602  synWtType = GET_FIXED_PLASTIC(connIt->second.connProp);
3603  if (synWtType == SYN_PLASTIC) {
3604  groupConfigMDMap[connIt->second.grpDest].fixedInputWts = false;
3605  }
3606  }
3607 
3608  // assigned global neruon ids to each group in the order...
3609  // !!!!!!! IMPORTANT : NEURON ORGANIZATION/ARRANGEMENT MAP !!!!!!!!!!
3610  // <--- Excitatory --> | <-------- Inhibitory REGION ----------> | <-- Excitatory -->
3611  // Excitatory-Regular | Inhibitory-Regular | Inhibitory-Poisson | Excitatory-Poisson
3612  int assignedGroup = 0;
3613  int availableNeuronId = 0;
3614  for(int order = 0; order < 4; order++) {
3615  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
3616  if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 3) {
3617  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3618  assignedGroup++;
3619  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && (groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 2) {
3620  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3621  assignedGroup++;
3622  } else if (IS_EXCITATORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 0) {
3623  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3624  assignedGroup++;
3625  } else if (IS_INHIBITORY_TYPE(groupConfigMap[gGrpId].type) && !(groupConfigMap[gGrpId].type & POISSON_NEURON) && order == 1) {
3626  availableNeuronId = assignGroup(gGrpId, availableNeuronId);
3627  assignedGroup++;
3628  }
3629  }
3630  }
3631  //assert(availableNeuronId == numN);
3632  assert(assignedGroup == numGroups);
3633 }
3634 
3635 void SNN::connectNetwork() {
3636  // this parse generates local connections
3637  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3638  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++) {
3639  switch(connIt->type) {
3640  case CONN_RANDOM:
3641  connectRandom(netId, connIt, false);
3642  break;
3643  case CONN_FULL:
3644  connectFull(netId, connIt, false);
3645  break;
3646  case CONN_FULL_NO_DIRECT:
3647  connectFull(netId, connIt, false);
3648  break;
3649  case CONN_ONE_TO_ONE:
3650  connectOneToOne(netId, connIt, false);
3651  break;
3652  case CONN_GAUSSIAN:
3653  connectGaussian(netId, connIt, false);
3654  break;
3655  case CONN_USER_DEFINED:
3656  connectUserDefined(netId, connIt, false);
3657  break;
3658  default:
3659  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3660  exitSimulation(-1);
3661  }
3662  }
3663  }
3664 
3665  // this parse generates external connections
3666  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
3667  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++) {
3668  switch(connIt->type) {
3669  case CONN_RANDOM:
3670  connectRandom(netId, connIt, true);
3671  break;
3672  case CONN_FULL:
3673  connectFull(netId, connIt, true);
3674  break;
3675  case CONN_FULL_NO_DIRECT:
3676  connectFull(netId, connIt, true);
3677  break;
3678  case CONN_ONE_TO_ONE:
3679  connectOneToOne(netId, connIt, true);
3680  break;
3681  case CONN_GAUSSIAN:
3682  connectGaussian(netId, connIt, true);
3683  break;
3684  case CONN_USER_DEFINED:
3685  connectUserDefined(netId, connIt, true);
3686  break;
3687  default:
3688  KERNEL_ERROR("Invalid connection type( should be 'random', 'full', 'full-no-direct', or 'one-to-one')");
3689  exitSimulation(-1);
3690  }
3691  }
3692  }
3693 }
3694 
3696 inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, int externalNetId) {
3697  //assert(destN <= CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
3698  ConnectionInfo connInfo;
3699  connInfo.grpSrc = _grpSrc;
3700  connInfo.grpDest = _grpDest;
3701  connInfo.nSrc = _nSrc;
3702  connInfo.nDest = _nDest;
3703  connInfo.srcGLoffset = 0;
3704  connInfo.connId = _connId;
3705  connInfo.preSynId = -1;
3706  connInfo.initWt = 0.0f;
3707  connInfo.maxWt = 0.0f;
3708  connInfo.delay = 0;
3709 
3710  // generate the delay vaule
3711  connInfo.delay = connectConfigMap[_connId].minDelay + rand() % (connectConfigMap[_connId].maxDelay - connectConfigMap[_connId].minDelay + 1);
3712  assert((connInfo.delay >= connectConfigMap[_connId].minDelay) && (connInfo.delay <= connectConfigMap[_connId].maxDelay));
3713  // generate the max weight and initial weight
3714  //float initWt = generateWeight(connectConfigMap[it->connId].connProp, connectConfigMap[it->connId].initWt, connectConfigMap[it->connId].maxWt, it->nSrc, it->grpSrc);
3715  float initWt = connectConfigMap[_connId].initWt;
3716  float maxWt = connectConfigMap[_connId].maxWt;
3717  // adjust sign of weight based on pre-group (negative if pre is inhibitory)
3718  // this access is fine, isExcitatoryGroup() use global grpId
3719  connInfo.maxWt = isExcitatoryGroup(_grpSrc) ? fabs(maxWt) : -1.0 * fabs(maxWt);
3720  connInfo.initWt = isExcitatoryGroup(_grpSrc) ? fabs(initWt) : -1.0 * fabs(initWt);
3721 
3722  connectionLists[netId].push_back(connInfo);
3723 
3724  // If the connection is external, copy the connection info to the external network
3725  if (externalNetId >= 0)
3726  connectionLists[externalNetId].push_back(connInfo);
3727 }
3728 
3730 inline void SNN::connectNeurons(int netId, int _grpSrc, int _grpDest, int _nSrc, int _nDest, short int _connId, float initWt, float maxWt, uint8_t delay, int externalNetId) {
3731  //assert(destN <= CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
3732  ConnectionInfo connInfo;
3733  connInfo.grpSrc = _grpSrc;
3734  connInfo.grpDest = _grpDest;
3735  connInfo.nSrc = _nSrc;
3736  connInfo.nDest = _nDest;
3737  connInfo.srcGLoffset = 0;
3738  connInfo.connId = _connId;
3739  connInfo.preSynId = -1;
3740  // adjust the sign of the weight based on inh/exc connection
3741  connInfo.initWt = isExcitatoryGroup(_grpSrc) ? fabs(initWt) : -1.0*fabs(initWt);
3742  connInfo.maxWt = isExcitatoryGroup(_grpSrc) ? fabs(maxWt) : -1.0*fabs(maxWt);
3743  connInfo.delay = delay;
3744 
3745  connectionLists[netId].push_back(connInfo);
3746 
3747  // If the connection is external, copy the connection info to the external network
3748  if (externalNetId >= 0)
3749  connectionLists[externalNetId].push_back(connInfo);
3750 }
3751 
3752 // make 'C' full connections from grpSrc to grpDest
3753 void SNN::connectFull(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3754  int grpSrc = connIt->grpSrc;
3755  int grpDest = connIt->grpDest;
3756  bool noDirect = (connIt->type == CONN_FULL_NO_DIRECT);
3757  int externalNetId = -1;
3758 
3759  if (isExternal) {
3760  externalNetId = groupConfigMDMap[grpDest].netId;
3761  assert(netId != externalNetId);
3762  }
3763 
3764  int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3765  for(int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3766  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
3767  int gPostStart = groupConfigMDMap[grpDest].gStartN;
3768  for(int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) { // j: the temp neuron id
3769  // if flag is set, don't connect direct connections
3770  if(noDirect && gPreN == gPostN)
3771  continue;
3772 
3773  // check whether pre-neuron location is in RF of post-neuron
3774  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
3775  if (!isPoint3DinRF(connIt->connRadius, locPre, locPost))
3776  continue;
3777 
3778  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3779  connIt->numberOfConnections++;
3780  }
3781  }
3782 
3783  std::list<GroupConfigMD>::iterator grpIt;
3784  GroupConfigMD targetGrp;
3785 
3786  // update numPostSynapses and numPreSynapses of groups in the local network
3787  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3788  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3789  assert(grpIt != groupPartitionLists[netId].end());
3790  grpIt->numPostSynapses += connIt->numberOfConnections;
3791 
3792  targetGrp.gGrpId = grpDest; // the other fields does not matter
3793  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3794  assert(grpIt != groupPartitionLists[netId].end());
3795  grpIt->numPreSynapses += connIt->numberOfConnections;
3796 
3797  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3798  if (isExternal) {
3799  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3800  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3801  assert(grpIt != groupPartitionLists[externalNetId].end());
3802  grpIt->numPostSynapses += connIt->numberOfConnections;
3803 
3804  targetGrp.gGrpId = grpDest; // the other fields does not matter
3805  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3806  assert(grpIt != groupPartitionLists[externalNetId].end());
3807  grpIt->numPreSynapses += connIt->numberOfConnections;
3808  }
3809 }
3810 
3811 void SNN::connectGaussian(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3812  // in case pre and post have different Grid3D sizes: scale pre to the grid size of post
3813  int grpSrc = connIt->grpSrc;
3814  int grpDest = connIt->grpDest;
3815  Grid3D grid_i = getGroupGrid3D(grpSrc);
3816  Grid3D grid_j = getGroupGrid3D(grpDest);
3817  Point3D scalePre = Point3D(grid_j.numX, grid_j.numY, grid_j.numZ) / Point3D(grid_i.numX, grid_i.numY, grid_i.numZ);
3818  int externalNetId = -1;
3819 
3820  if (isExternal) {
3821  externalNetId = groupConfigMDMap[grpDest].netId;
3822  assert(netId != externalNetId);
3823  }
3824 
3825  for(int i = groupConfigMDMap[grpSrc].gStartN; i <= groupConfigMDMap[grpSrc].gEndN; i++) {
3826  Point3D loc_i = getNeuronLocation3D(i)*scalePre; // i: adjusted 3D coordinates
3827 
3828  for(int j = groupConfigMDMap[grpDest].gStartN; j <= groupConfigMDMap[grpDest].gEndN; j++) { // j: the temp neuron id
3829  // check whether pre-neuron location is in RF of post-neuron
3830  Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
3831 
3832  // make sure point is in RF
3833  double rfDist = getRFDist3D(connIt->connRadius,loc_i,loc_j);
3834  if (rfDist < 0.0 || rfDist > 1.0)
3835  continue;
3836 
3837  // if rfDist is valid, it returns a number between 0 and 1
3838  // we want these numbers to fit to Gaussian weigths, so that rfDist=0 corresponds to max Gaussian weight
3839  // and rfDist=1 corresponds to 0.1 times max Gaussian weight
3840  // so we're looking at gauss = exp(-a*rfDist), where a such that exp(-a)=0.1
3841  // solving for a, we find that a = 2.3026
3842  double gauss = exp(-2.3026*rfDist);
3843  if (gauss < 0.1)
3844  continue;
3845 
3846  if (drand48() < connIt->connProbability) {
3847  float initWt = gauss * connIt->initWt; // scale weight according to gauss distance
3848  float maxWt = connIt->maxWt;
3849  uint8_t delay = connIt->minDelay + rand() % (connIt->maxDelay - connIt->minDelay + 1);
3850  assert((delay >= connIt->minDelay) && (delay <= connIt->maxDelay));
3851 
3852  connectNeurons(netId, grpSrc, grpDest, i, j, connIt->connId, initWt, maxWt, delay, externalNetId);
3853  connIt->numberOfConnections++;
3854  }
3855  }
3856  }
3857 
3858  std::list<GroupConfigMD>::iterator grpIt;
3859  GroupConfigMD targetGrp;
3860 
3861  // update numPostSynapses and numPreSynapses of groups in the local network
3862  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3863  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3864  assert(grpIt != groupPartitionLists[netId].end());
3865  grpIt->numPostSynapses += connIt->numberOfConnections;
3866 
3867  targetGrp.gGrpId = grpDest; // the other fields does not matter
3868  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3869  assert(grpIt != groupPartitionLists[netId].end());
3870  grpIt->numPreSynapses += connIt->numberOfConnections;
3871 
3872  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3873  if (isExternal) {
3874  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3875  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3876  assert(grpIt != groupPartitionLists[externalNetId].end());
3877  grpIt->numPostSynapses += connIt->numberOfConnections;
3878 
3879  targetGrp.gGrpId = grpDest; // the other fields does not matter
3880  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3881  assert(grpIt != groupPartitionLists[externalNetId].end());
3882  grpIt->numPreSynapses += connIt->numberOfConnections;
3883  }
3884 }
3885 
3886 void SNN::connectOneToOne(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3887  int grpSrc = connIt->grpSrc;
3888  int grpDest = connIt->grpDest;
3889  int externalNetId = -1;
3890 
3891  if (isExternal) {
3892  externalNetId = groupConfigMDMap[grpDest].netId;
3893  assert(netId != externalNetId);
3894  }
3895 
3896  assert( groupConfigMap[grpDest].numN == groupConfigMap[grpSrc].numN);
3897 
3898  // NOTE: RadiusRF does not make a difference here: ignore
3899  for(int gPreN = groupConfigMDMap[grpSrc].gStartN, gPostN = groupConfigMDMap[grpDest].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++, gPostN++) {
3900  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3901  connIt->numberOfConnections++;
3902  }
3903 
3904  std::list<GroupConfigMD>::iterator grpIt;
3905  GroupConfigMD targetGrp;
3906 
3907  // update numPostSynapses and numPreSynapses of groups in the local network
3908  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3909  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3910  assert(grpIt != groupPartitionLists[netId].end());
3911  grpIt->numPostSynapses += connIt->numberOfConnections;
3912 
3913  targetGrp.gGrpId = grpDest; // the other fields does not matter
3914  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3915  assert(grpIt != groupPartitionLists[netId].end());
3916  grpIt->numPreSynapses += connIt->numberOfConnections;
3917 
3918  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3919  if (isExternal) {
3920  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3921  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3922  assert(grpIt != groupPartitionLists[externalNetId].end());
3923  grpIt->numPostSynapses += connIt->numberOfConnections;
3924 
3925  targetGrp.gGrpId = grpDest; // the other fields does not matter
3926  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3927  assert(grpIt != groupPartitionLists[externalNetId].end());
3928  grpIt->numPreSynapses += connIt->numberOfConnections;
3929  }
3930 }
3931 
3932 // make 'C' random connections from grpSrc to grpDest
3933 void SNN::connectRandom(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3934  int grpSrc = connIt->grpSrc;
3935  int grpDest = connIt->grpDest;
3936  int externalNetId = -1;
3937 
3938  if (isExternal) {
3939  externalNetId = groupConfigMDMap[grpDest].netId;
3940  assert(netId != externalNetId);
3941  }
3942 
3943  int gPreStart = groupConfigMDMap[grpSrc].gStartN;
3944  for(int gPreN = groupConfigMDMap[grpSrc].gStartN; gPreN <= groupConfigMDMap[grpSrc].gEndN; gPreN++) {
3945  Point3D locPre = getNeuronLocation3D(grpSrc, gPreN - gPreStart); // 3D coordinates of i
3946  int gPostStart = groupConfigMDMap[grpDest].gStartN;
3947  for(int gPostN = groupConfigMDMap[grpDest].gStartN; gPostN <= groupConfigMDMap[grpDest].gEndN; gPostN++) {
3948  // check whether pre-neuron location is in RF of post-neuron
3949  Point3D locPost = getNeuronLocation3D(grpDest, gPostN - gPostStart); // 3D coordinates of j
3950  if (!isPoint3DinRF(connIt->connRadius, locPre, locPost))
3951  continue;
3952 
3953  if (drand48() < connIt->connProbability) {
3954  connectNeurons(netId, grpSrc, grpDest, gPreN, gPostN, connIt->connId, externalNetId);
3955  connIt->numberOfConnections++;
3956  }
3957  }
3958  }
3959 
3960  std::list<GroupConfigMD>::iterator grpIt;
3961  GroupConfigMD targetGrp;
3962 
3963  // update numPostSynapses and numPreSynapses of groups in the local network
3964  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3965  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3966  assert(grpIt != groupPartitionLists[netId].end());
3967  grpIt->numPostSynapses += connIt->numberOfConnections;
3968 
3969  targetGrp.gGrpId = grpDest; // the other fields does not matter
3970  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
3971  assert(grpIt != groupPartitionLists[netId].end());
3972  grpIt->numPreSynapses += connIt->numberOfConnections;
3973 
3974  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
3975  if (isExternal) {
3976  targetGrp.gGrpId = grpSrc; // the other fields does not matter
3977  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3978  assert(grpIt != groupPartitionLists[externalNetId].end());
3979  grpIt->numPostSynapses += connIt->numberOfConnections;
3980 
3981  targetGrp.gGrpId = grpDest; // the other fields does not matter
3982  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
3983  assert(grpIt != groupPartitionLists[externalNetId].end());
3984  grpIt->numPreSynapses += connIt->numberOfConnections;
3985  }
3986 }
3987 
3988 // FIXME: rewrite user-define call-back function
3989 // user-defined functions called here...
3990 // This is where we define our user-defined call-back function. -- KDC
3991 void SNN::connectUserDefined(int netId, std::list<ConnectConfig>::iterator connIt, bool isExternal) {
3992  int grpSrc = connIt->grpSrc;
3993  int grpDest = connIt->grpDest;
3994  int externalNetId = -1;
3995 
3996  if (isExternal) {
3997  externalNetId = groupConfigMDMap[grpDest].netId;
3998  assert(netId != externalNetId);
3999  }
4000 
4001  connIt->maxDelay = 0;
4002  int preStartN = groupConfigMDMap[grpSrc].gStartN;
4003  int postStartN = groupConfigMDMap[grpDest].gStartN;
4004  for (int pre_nid = groupConfigMDMap[grpSrc].gStartN; pre_nid <= groupConfigMDMap[grpSrc].gEndN; pre_nid++) {
4005  //Point3D loc_pre = getNeuronLocation3D(pre_nid); // 3D coordinates of i
4006  for (int post_nid = groupConfigMDMap[grpDest].gStartN; post_nid <= groupConfigMDMap[grpDest].gEndN; post_nid++) {
4007  float weight, maxWt, delay;
4008  bool connected;
4009 
4010  connIt->conn->connect(this, grpSrc, pre_nid - preStartN, grpDest, post_nid - postStartN, weight, maxWt, delay, connected);
4011  if (connected) {
4012  assert(delay >= 1);
4013  assert(delay <= MAX_SYN_DELAY);
4014  assert(abs(weight) <= abs(maxWt));
4015 
4016  if (GET_FIXED_PLASTIC(connIt->connProp) == SYN_FIXED)
4017  maxWt = weight;
4018 
4019  if (fabs(maxWt) > connIt->maxWt)
4020  connIt->maxWt = fabs(maxWt);
4021 
4022  if (delay > connIt->maxDelay)
4023  connIt->maxDelay = delay;
4024 
4025  connectNeurons(netId, grpSrc, grpDest, pre_nid, post_nid, connIt->connId, weight, maxWt, delay, externalNetId);
4026  connIt->numberOfConnections++;
4027  }
4028  }
4029  }
4030 
4031  std::list<GroupConfigMD>::iterator grpIt;
4032  GroupConfigMD targetGrp;
4033 
4034  // update numPostSynapses and numPreSynapses of groups in the local network
4035  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4036  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4037  assert(grpIt != groupPartitionLists[netId].end());
4038  grpIt->numPostSynapses += connIt->numberOfConnections;
4039 
4040  targetGrp.gGrpId = grpDest; // the other fields does not matter
4041  grpIt = std::find(groupPartitionLists[netId].begin(), groupPartitionLists[netId].end(), targetGrp);
4042  assert(grpIt != groupPartitionLists[netId].end());
4043  grpIt->numPreSynapses += connIt->numberOfConnections;
4044 
4045  // also update numPostSynapses and numPreSynapses of groups in the external network if the connection is external
4046  if (isExternal) {
4047  targetGrp.gGrpId = grpSrc; // the other fields does not matter
4048  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4049  assert(grpIt != groupPartitionLists[externalNetId].end());
4050  grpIt->numPostSynapses += connIt->numberOfConnections;
4051 
4052  targetGrp.gGrpId = grpDest; // the other fields does not matter
4053  grpIt = std::find(groupPartitionLists[externalNetId].begin(), groupPartitionLists[externalNetId].end(), targetGrp);
4054  assert(grpIt != groupPartitionLists[externalNetId].end());
4055  grpIt->numPreSynapses += connIt->numberOfConnections;
4056  }
4057 }
4058 
4060 //void SNN::connectFull(short int connId) {
4061 // int grpSrc = connectConfigMap[connId].grpSrc;
4062 // int grpDest = connectConfigMap[connId].grpDest;
4063 // bool noDirect = (connectConfigMap[connId].type == CONN_FULL_NO_DIRECT);
4064 //
4065 // // rebuild struct for easier handling
4066 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
4067 //
4068 // for(int i = groupConfigMap[grpSrc].StartN; i <= groupConfigMap[grpSrc].EndN; i++) {
4069 // Point3D loc_i = getNeuronLocation3D(i); // 3D coordinates of i
4070 // for(int j = groupConfigMap[grpDest].StartN; j <= groupConfigMap[grpDest].EndN; j++) { // j: the temp neuron id
4071 // // if flag is set, don't connect direct connections
4072 // if((noDirect) && (i - groupConfigMap[grpSrc].StartN) == (j - groupConfigMap[grpDest].StartN))
4073 // continue;
4074 //
4075 // // check whether pre-neuron location is in RF of post-neuron
4076 // Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
4077 // if (!isPoint3DinRF(radius, loc_i, loc_j))
4078 // continue;
4079 //
4080 // //uint8_t dVal = info->minDelay + (int)(0.5 + (drand48() * (info->maxDelay - info->minDelay)));
4081 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4082 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4083 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, i, grpSrc);
4084 //
4085 // setConnection(grpSrc, grpDest, i, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);// info->connId);
4086 // connectConfigMap[connId].numberOfConnections++;
4087 // }
4088 // }
4089 //
4090 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4091 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4092 //}
4093 
4094 //void SNN::connectGaussian(short int connId) {
4095 // // rebuild struct for easier handling
4096 // // adjust with sqrt(2) in order to make the Gaussian kernel depend on 2*sigma^2
4097 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
4098 //
4099 // // in case pre and post have different Grid3D sizes: scale pre to the grid size of post
4100 // int grpSrc = connectConfigMap[connId].grpSrc;
4101 // int grpDest = connectConfigMap[connId].grpDest;
4102 // Grid3D grid_i = getGroupGrid3D(grpSrc);
4103 // Grid3D grid_j = getGroupGrid3D(grpDest);
4104 // Point3D scalePre = Point3D(grid_j.numX, grid_j.numY, grid_j.numZ) / Point3D(grid_i.numX, grid_i.numY, grid_i.numZ);
4105 //
4106 // for(int i = groupConfigMap[grpSrc].StartN; i <= groupConfigMap[grpSrc].EndN; i++) {
4107 // Point3D loc_i = getNeuronLocation3D(i)*scalePre; // i: adjusted 3D coordinates
4108 //
4109 // for(int j = groupConfigMap[grpDest].StartN; j <= groupConfigMap[grpDest].EndN; j++) { // j: the temp neuron id
4110 // // check whether pre-neuron location is in RF of post-neuron
4111 // Point3D loc_j = getNeuronLocation3D(j); // 3D coordinates of j
4112 //
4113 // // make sure point is in RF
4114 // double rfDist = getRFDist3D(radius,loc_i,loc_j);
4115 // if (rfDist < 0.0 || rfDist > 1.0)
4116 // continue;
4117 //
4118 // // if rfDist is valid, it returns a number between 0 and 1
4119 // // we want these numbers to fit to Gaussian weigths, so that rfDist=0 corresponds to max Gaussian weight
4120 // // and rfDist=1 corresponds to 0.1 times max Gaussian weight
4121 // // so we're looking at gauss = exp(-a*rfDist), where a such that exp(-a)=0.1
4122 // // solving for a, we find that a = 2.3026
4123 // double gauss = exp(-2.3026*rfDist);
4124 // if (gauss < 0.1)
4125 // continue;
4126 //
4127 // if (drand48() < connectConfigMap[connId].p) {
4128 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4129 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4130 // float synWt = gauss * connectConfigMap[connId].initWt; // scale weight according to gauss distance
4131 // setConnection(grpSrc, grpDest, i, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);//info->connId);
4132 // connectConfigMap[connId].numberOfConnections++;
4133 // }
4134 // }
4135 // }
4136 //
4137 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4138 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4139 //}
4140 //
4141 //void SNN::connectOneToOne(short int connId) {
4142 // int grpSrc = connectConfigMap[connId].grpSrc;
4143 // int grpDest = connectConfigMap[connId].grpDest;
4144 // assert( groupConfigMap[grpDest].SizeN == groupConfigMap[grpSrc].SizeN );
4145 //
4146 // // NOTE: RadiusRF does not make a difference here: ignore
4147 // for(int nid=groupConfigMap[grpSrc].StartN,j=groupConfigMap[grpDest].StartN; nid<=groupConfigMap[grpSrc].EndN; nid++, j++) {
4148 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4149 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4150 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, nid, grpSrc);
4151 // setConnection(grpSrc, grpDest, nid, j, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId);//info->connId);
4152 // connectConfigMap[connId].numberOfConnections++;
4153 // }
4154 //
4155 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4156 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4157 //}
4158 //
4160 //void SNN::connectRandom(short int connId) {
4161 // int grpSrc = connectConfigMap[connId].grpSrc;
4162 // int grpDest = connectConfigMap[connId].grpDest;
4163 //
4164 // // rebuild struct for easier handling
4165 // RadiusRF radius(connectConfigMap[connId].radX, connectConfigMap[connId].radY, connectConfigMap[connId].radZ);
4166 //
4167 // for(int pre_nid = groupConfigMap[grpSrc].StartN; pre_nid <= groupConfigMap[grpSrc].EndN; pre_nid++) {
4168 // Point3D loc_pre = getNeuronLocation3D(pre_nid); // 3D coordinates of i
4169 // for(int post_nid = groupConfigMap[grpDest].StartN; post_nid <= groupConfigMap[grpDest].EndN; post_nid++) {
4170 // // check whether pre-neuron location is in RF of post-neuron
4171 // Point3D loc_post = getNeuronLocation3D(post_nid); // 3D coordinates of j
4172 // if (!isPoint3DinRF(radius, loc_pre, loc_post))
4173 // continue;
4174 //
4175 // if (drand48() < connectConfigMap[connId].p) {
4176 // //uint8_t dVal = info->minDelay + (int)(0.5+(drand48()*(info->maxDelay-info->minDelay)));
4177 // uint8_t dVal = connectConfigMap[connId].minDelay + rand() % (connectConfigMap[connId].maxDelay - connectConfigMap[connId].minDelay + 1);
4178 // assert((dVal >= connectConfigMap[connId].minDelay) && (dVal <= connectConfigMap[connId].maxDelay));
4179 // float synWt = generateWeight(connectConfigMap[connId].connProp, connectConfigMap[connId].initWt, connectConfigMap[connId].maxWt, pre_nid, grpSrc);
4180 // setConnection(grpSrc, grpDest, pre_nid, post_nid, synWt, connectConfigMap[connId].maxWt, dVal, connectConfigMap[connId].connProp, connId); //info->connId);
4181 // connectConfigMap[connId].numberOfConnections++;
4182 // }
4183 // }
4184 // }
4185 //
4186 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4187 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4188 //}
4189 //
4192 //void SNN::connectUserDefined(short int connId) {
4193 // int grpSrc = connectConfigMap[connId].grpSrc;
4194 // int grpDest = connectConfigMap[connId].grpDest;
4195 // connectConfigMap[connId].maxDelay = 0;
4196 // for(int nid=groupConfigMap[grpSrc].StartN; nid<=groupConfigMap[grpSrc].EndN; nid++) {
4197 // for(int nid2=groupConfigMap[grpDest].StartN; nid2 <= groupConfigMap[grpDest].EndN; nid2++) {
4198 // int srcId = nid - groupConfigMap[grpSrc].StartN;
4199 // int destId = nid2 - groupConfigMap[grpDest].StartN;
4200 // float weight, maxWt, delay;
4201 // bool connected;
4202 //
4203 // connectConfigMap[connId].conn->connect(this, grpSrc, srcId, grpDest, destId, weight, maxWt, delay, connected);
4204 // if(connected) {
4205 // if (GET_FIXED_PLASTIC(connectConfigMap[connId].connProp) == SYN_FIXED)
4206 // maxWt = weight;
4207 //
4208 // connectConfigMap[connId].maxWt = maxWt;
4209 //
4210 // assert(delay >= 1);
4211 // assert(delay <= MAX_SYN_DELAY);
4212 // assert(abs(weight) <= abs(maxWt));
4213 //
4214 // // adjust the sign of the weight based on inh/exc connection
4215 // weight = isExcitatoryGroup(grpSrc) ? fabs(weight) : -1.0*fabs(weight);
4216 // maxWt = isExcitatoryGroup(grpSrc) ? fabs(maxWt) : -1.0*fabs(maxWt);
4217 //
4218 // setConnection(grpSrc, grpDest, nid, nid2, weight, maxWt, delay, connectConfigMap[connId].connProp, connId);// info->connId);
4219 // connectConfigMap[connId].numberOfConnections++;
4220 // if(delay > connectConfigMap[connId].maxDelay) {
4221 // connectConfigMap[connId].maxDelay = delay;
4222 // }
4223 // }
4224 // }
4225 // }
4226 //
4227 // groupInfo[grpSrc].sumPostConn += connectConfigMap[connId].numberOfConnections;
4228 // groupInfo[grpDest].sumPreConn += connectConfigMap[connId].numberOfConnections;
4229 //}
4230 
4231 void SNN::deleteRuntimeData() {
4232  // FIXME: assert simulation use GPU first
4233  // wait for kernels to complete
4234 #ifndef __NO_CUDA__
4235  CUDA_CHECK_ERRORS(cudaThreadSynchronize());
4236 #endif
4237 
4238  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4239  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
4240  cpu_set_t cpus;
4241  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
4242  int threadCount = 0;
4243  #endif
4244 
4245  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4246  if (!groupPartitionLists[netId].empty()) {
4247  if (netId < CPU_RUNTIME_BASE) // GPU runtime
4248  deleteRuntimeData_GPU(netId);
4249  else{ // CPU runtime
4250  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4251  deleteRuntimeData_CPU(netId);
4252  #else // Linux or MAC
4253  pthread_attr_t attr;
4254  pthread_attr_init(&attr);
4255  CPU_ZERO(&cpus);
4256  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
4257  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
4258 
4259  argsThreadRoutine[threadCount].snn_pointer = this;
4260  argsThreadRoutine[threadCount].netId = netId;
4261  argsThreadRoutine[threadCount].lGrpId = 0;
4262  argsThreadRoutine[threadCount].startIdx = 0;
4263  argsThreadRoutine[threadCount].endIdx = 0;
4264  argsThreadRoutine[threadCount].GtoLOffset = 0;
4265 
4266  pthread_create(&threads[threadCount], &attr, &SNN::helperDeleteRuntimeData_CPU, (void*)&argsThreadRoutine[threadCount]);
4267  pthread_attr_destroy(&attr);
4268  threadCount++;
4269  #endif
4270  }
4271  }
4272  }
4273 
4274  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4275  // join all the threads
4276  for (int i=0; i<threadCount; i++){
4277  pthread_join(threads[i], NULL);
4278  }
4279  #endif
4280 
4281 #ifndef __NO_CUDA__
4282  CUDA_DELETE_TIMER(timer);
4283 #endif
4284 }
4285 
4286 // delete all objects (CPU and GPU side)
4287 void SNN::deleteObjects() {
4288  if (simulatorDeleted)
4289  return;
4290 
4291  printSimSummary();
4292 
4293  // deallocate objects
4294  resetMonitors(true);
4295  resetConnectionConfigs(true);
4296 
4297  // delete manager runtime data
4298  deleteManagerRuntimeData();
4299 
4300  deleteRuntimeData();
4301 
4302  // fclose file streams, unless in custom mode
4303  if (loggerMode_ != CUSTOM) {
4304  // don't fclose if it's stdout or stderr, otherwise they're gonna stay closed for the rest of the process
4305  if (fpInf_ != NULL && fpInf_ != stdout && fpInf_ != stderr)
4306  fclose(fpInf_);
4307  if (fpErr_ != NULL && fpErr_ != stdout && fpErr_ != stderr)
4308  fclose(fpErr_);
4309  if (fpDeb_ != NULL && fpDeb_ != stdout && fpDeb_ != stderr)
4310  fclose(fpDeb_);
4311  if (fpLog_ != NULL && fpLog_ != stdout && fpLog_ != stderr)
4312  fclose(fpLog_);
4313  }
4314 
4315  simulatorDeleted = true;
4316 }
4317 
4318 void SNN::findMaxNumSynapsesGroups(int* _maxNumPostSynGrp, int* _maxNumPreSynGrp) {
4319  *_maxNumPostSynGrp = 0;
4320  *_maxNumPreSynGrp = 0;
4321 
4322  // scan all the groups and find the required information
4323  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4324  // find the values for maximum postsynaptic length
4325  // and maximum pre-synaptic length
4326  if (groupConfigMDMap[gGrpId].numPostSynapses > *_maxNumPostSynGrp)
4327  *_maxNumPostSynGrp = groupConfigMDMap[gGrpId].numPostSynapses;
4328  if (groupConfigMDMap[gGrpId].numPreSynapses > *_maxNumPreSynGrp)
4329  *_maxNumPreSynGrp = groupConfigMDMap[gGrpId].numPreSynapses;
4330  }
4331 }
4332 
4333 void SNN::findMaxNumSynapsesNeurons(int _netId, int& _maxNumPostSynN, int& _maxNumPreSynN) {
4334  int *tempNpre, *tempNpost;
4335  int nSrc, nDest, numNeurons;
4336  std::map<int, int> globalToLocalOffset;
4337 
4338  numNeurons = networkConfigs[_netId].numNAssigned;
4339  tempNpre = new int[numNeurons];
4340  tempNpost = new int[numNeurons];
4341  memset(tempNpre, 0, sizeof(int) * numNeurons);
4342  memset(tempNpost, 0, sizeof(int) * numNeurons);
4343 
4344  // load offset between global neuron id and local neuron id
4345  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4346  globalToLocalOffset[grpIt->gGrpId] = grpIt->GtoLOffset;
4347  }
4348 
4349  // calculate number of pre- and post- connections of each neuron
4350  for (std::list<ConnectionInfo>::iterator connIt = connectionLists[_netId].begin(); connIt != connectionLists[_netId].end(); connIt++) {
4351  nSrc = connIt->nSrc + globalToLocalOffset[connIt->grpSrc];
4352  nDest = connIt->nDest + globalToLocalOffset[connIt->grpDest];
4353  assert(nSrc < numNeurons); assert(nDest < numNeurons);
4354  tempNpost[nSrc]++;
4355  tempNpre[nDest]++;
4356  }
4357 
4358  // find out the maximum number of pre- and post- connections among neurons in a local network
4359  _maxNumPostSynN = 0;
4360  _maxNumPreSynN = 0;
4361  for (int nId = 0; nId < networkConfigs[_netId].numN; nId++) {
4362  if (tempNpost[nId] > _maxNumPostSynN) _maxNumPostSynN = tempNpost[nId];
4363  if (tempNpre[nId] > _maxNumPreSynN) _maxNumPreSynN = tempNpre[nId];
4364  }
4365 
4366  delete [] tempNpre;
4367  delete [] tempNpost;
4368 }
4369 
4370 void SNN::findMaxSpikesD1D2(int _netId, unsigned int& _maxSpikesD1, unsigned int& _maxSpikesD2) {
4371  _maxSpikesD1 = 0; _maxSpikesD2 = 0;
4372  for(std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4373  if (grpIt->maxOutgoingDelay == 1)
4374  _maxSpikesD1 += (groupConfigMap[grpIt->gGrpId].numN * NEURON_MAX_FIRING_RATE);
4375  else
4376  _maxSpikesD2 += (groupConfigMap[grpIt->gGrpId].numN * NEURON_MAX_FIRING_RATE);
4377  }
4378 }
4379 
4380 void SNN::findNumN(int _netId, int& _numN, int& _numNExternal, int& _numNAssigned,
4381  int& _numNReg, int& _numNExcReg, int& _numNInhReg,
4382  int& _numNPois, int& _numNExcPois, int& _numNInhPois) {
4383  _numN = 0; _numNExternal = 0; _numNAssigned = 0;
4384  _numNReg = 0; _numNExcReg = 0; _numNInhReg = 0;
4385  _numNPois = 0; _numNExcPois = 0; _numNInhPois = 0;
4386  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4387  int sizeN = groupConfigMap[grpIt->gGrpId].numN;
4388  unsigned int type = groupConfigMap[grpIt->gGrpId].type;
4389  if (IS_EXCITATORY_TYPE(type) && (type & POISSON_NEURON) && grpIt->netId == _netId) {
4390  _numN += sizeN; _numNPois += sizeN; _numNExcPois += sizeN;
4391  } else if (IS_INHIBITORY_TYPE(type) && (type & POISSON_NEURON) && grpIt->netId == _netId) {
4392  _numN += sizeN; _numNPois += sizeN; _numNInhPois += sizeN;
4393  } else if (IS_EXCITATORY_TYPE(type) && !(type & POISSON_NEURON) && grpIt->netId == _netId) {
4394  _numN += sizeN; _numNReg += sizeN; _numNExcReg += sizeN;
4395  } else if (IS_INHIBITORY_TYPE(type) && !(type & POISSON_NEURON) && grpIt->netId == _netId) {
4396  _numN += sizeN; _numNReg += sizeN; _numNInhReg += sizeN;
4397  } else if (grpIt->netId != _netId) {
4398  _numNExternal += sizeN;
4399  } else {
4400  KERNEL_ERROR("Can't find catagory for the group [%d] ", grpIt->gGrpId);
4401  exitSimulation(-1);
4402  }
4403  _numNAssigned += sizeN;
4404  }
4405 
4406  assert(_numNReg == _numNExcReg + _numNInhReg);
4407  assert(_numNPois == _numNExcPois + _numNInhPois);
4408  assert(_numN == _numNReg + _numNPois);
4409  assert(_numNAssigned == _numN + _numNExternal);
4410 }
4411 
4412 void SNN::findNumNSpikeGenAndOffset(int _netId) {
4413  networkConfigs[_netId].numNSpikeGen = 0;
4414 
4415  for(int lGrpId = 0; lGrpId < networkConfigs[_netId].numGroups; lGrpId++) {
4416  if (_netId == groupConfigs[_netId][lGrpId].netId && groupConfigs[_netId][lGrpId].isSpikeGenerator && groupConfigs[_netId][lGrpId].isSpikeGenFunc) {
4417  groupConfigs[_netId][lGrpId].Noffset = networkConfigs[_netId].numNSpikeGen;
4418  networkConfigs[_netId].numNSpikeGen += groupConfigs[_netId][lGrpId].numN;
4419  }
4420  }
4421 
4422  assert(networkConfigs[_netId].numNSpikeGen <= networkConfigs[_netId].numNPois);
4423 }
4424 
4425 void SNN::findNumSynapsesNetwork(int _netId, int& _numPostSynNet, int& _numPreSynNet) {
4426  _numPostSynNet = 0;
4427  _numPreSynNet = 0;
4428 
4429  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[_netId].begin(); grpIt != groupPartitionLists[_netId].end(); grpIt++) {
4430  _numPostSynNet += grpIt->numPostSynapses;
4431  _numPreSynNet += grpIt->numPreSynapses;
4432  assert(_numPostSynNet < INT_MAX);
4433  assert(_numPreSynNet < INT_MAX);
4434  }
4435 
4436  assert(_numPreSynNet == _numPostSynNet);
4437 }
4438 
4439 void SNN::fetchGroupState(int netId, int lGrpId) {
4440  if (netId < CPU_RUNTIME_BASE)
4441  copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4442  else
4443  copyGroupState(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false);
4444 }
4445 
4446 void SNN::fetchWeightState(int netId, int lGrpId) {
4447  if (netId < CPU_RUNTIME_BASE)
4448  copyWeightState(netId, lGrpId, cudaMemcpyDeviceToHost);
4449  else
4450  copyWeightState(netId, lGrpId);
4451 }
4452 
4458 void SNN::fetchNeuronSpikeCount (int gGrpId) {
4459  if (gGrpId == ALL) {
4460  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4461  fetchNeuronSpikeCount(gGrpId);
4462  }
4463  } else {
4464  int netId = groupConfigMDMap[gGrpId].netId;
4465  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4466  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4467 
4468  if (netId < CPU_RUNTIME_BASE)
4469  copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4470  else
4471  copyNeuronSpikeCount(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4472  }
4473 }
4474 
4475 void SNN::fetchSTPState(int gGrpId) {
4476 }
4477 
4483 void SNN::fetchConductanceAMPA(int gGrpId) {
4484  if (gGrpId == ALL) {
4485  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4486  fetchConductanceAMPA(gGrpId);
4487  }
4488  } else {
4489  int netId = groupConfigMDMap[gGrpId].netId;
4490  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4491  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4492 
4493  if (netId < CPU_RUNTIME_BASE)
4494  copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4495  else
4496  copyConductanceAMPA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4497  }
4498 }
4499 
4505 void SNN::fetchConductanceNMDA(int gGrpId) {
4506  if (gGrpId == ALL) {
4507  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4508  fetchConductanceNMDA(gGrpId);
4509  }
4510  } else {
4511  int netId = groupConfigMDMap[gGrpId].netId;
4512  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4513  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4514 
4515  if (netId < CPU_RUNTIME_BASE)
4516  copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4517  else
4518  copyConductanceNMDA(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4519  }
4520 }
4521 
4527 void SNN::fetchConductanceGABAa(int gGrpId) {
4528  if (gGrpId == ALL) {
4529  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4530  fetchConductanceGABAa(gGrpId);
4531  }
4532  } else {
4533  int netId = groupConfigMDMap[gGrpId].netId;
4534  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4535  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4536 
4537  if (netId < CPU_RUNTIME_BASE)
4538  copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4539  else
4540  copyConductanceGABAa(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4541  }
4542 }
4543 
4549 void SNN::fetchConductanceGABAb(int gGrpId) {
4550  if (gGrpId == ALL) {
4551  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
4552  fetchConductanceGABAb(gGrpId);
4553  }
4554  } else {
4555  int netId = groupConfigMDMap[gGrpId].netId;
4556  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
4557  int LtoGOffset = groupConfigMDMap[gGrpId].LtoGOffset;
4558 
4559  if (netId < CPU_RUNTIME_BASE)
4560  copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, LtoGOffset);
4561  else
4562  copyConductanceGABAb(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false, LtoGOffset);
4563  }
4564 }
4565 
4566 
4567 void SNN::fetchGrpIdsLookupArray(int netId) {
4568  if (netId < CPU_RUNTIME_BASE)
4569  copyGrpIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4570  else
4571  copyGrpIdsLookupArray(netId);
4572 }
4573 
4574 void SNN::fetchConnIdsLookupArray(int netId) {
4575  if (netId < CPU_RUNTIME_BASE)
4576  copyConnIdsLookupArray(netId, cudaMemcpyDeviceToHost);
4577  else
4578  copyConnIdsLookupArray(netId);
4579 }
4580 
4581 void SNN::fetchLastSpikeTime(int netId) {
4582  if (netId < CPU_RUNTIME_BASE)
4583  copyLastSpikeTime(netId, cudaMemcpyDeviceToHost);
4584  else
4585  copyLastSpikeTime(netId);
4586 }
4587 
4588 void SNN::fetchPreConnectionInfo(int netId) {
4589  if (netId < CPU_RUNTIME_BASE)
4590  copyPreConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4591  else
4592  copyPreConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], false);
4593 }
4594 
4595 void SNN::fetchPostConnectionInfo(int netId) {
4596  if (netId < CPU_RUNTIME_BASE)
4597  copyPostConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4598  else
4599  copyPostConnectionInfo(netId, ALL, &managerRuntimeData, &runtimeData[netId], false);
4600 }
4601 
4602 void SNN::fetchSynapseState(int netId) {
4603  if (netId < CPU_RUNTIME_BASE)
4604  copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4605  else
4606  copySynapseState(netId, &managerRuntimeData, &runtimeData[netId], false);
4607 }
4608 
4609 
4613 void SNN::fetchNetworkSpikeCount() {
4614  unsigned int spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2;
4615 
4616  managerRuntimeData.spikeCountD1 = 0;
4617  managerRuntimeData.spikeCountD2 = 0;
4618  managerRuntimeData.spikeCountExtRxD2 = 0;
4619  managerRuntimeData.spikeCountExtRxD1 = 0;
4620  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
4621  if (!groupPartitionLists[netId].empty()) {
4622 
4623  if (netId < CPU_RUNTIME_BASE) {
4624  copyNetworkSpikeCount(netId, cudaMemcpyDeviceToHost,
4625  &spikeCountD1, &spikeCountD2,
4626  &spikeCountExtD1, &spikeCountExtD2);
4627  //printf("netId:%d, D1:%d/D2:%d, extD1:%d/D2:%d\n", netId, spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2);
4628  } else {
4629  copyNetworkSpikeCount(netId,
4630  &spikeCountD1, &spikeCountD2,
4631  &spikeCountExtD1, &spikeCountExtD2);
4632  //printf("netId:%d, D1:%d/D2:%d, extD1:%d/D2:%d\n", netId, spikeCountD1, spikeCountD2, spikeCountExtD1, spikeCountExtD2);
4633  }
4634 
4635  managerRuntimeData.spikeCountD2 += spikeCountD2 - spikeCountExtD2;
4636  managerRuntimeData.spikeCountD1 += spikeCountD1 - spikeCountExtD1;
4637  managerRuntimeData.spikeCountExtRxD2 += spikeCountExtD2;
4638  managerRuntimeData.spikeCountExtRxD1 += spikeCountExtD1;
4639  }
4640  }
4641 
4642  managerRuntimeData.spikeCount = managerRuntimeData.spikeCountD1 + managerRuntimeData.spikeCountD2;
4643 }
4644 
4645 void SNN::fetchSpikeTables(int netId) {
4646  if (netId < CPU_RUNTIME_BASE)
4647  copySpikeTables(netId, cudaMemcpyDeviceToHost);
4648  else
4649  copySpikeTables(netId);
4650 }
4651 
4652 void SNN::fetchNeuronStateBuffer(int netId, int lGrpId) {
4653  if (netId < CPU_RUNTIME_BASE)
4654  copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
4655  else
4656  copyNeuronStateBuffer(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], false);
4657 }
4658 
4659 void SNN::fetchExtFiringTable(int netId) {
4660  assert(netId < MAX_NET_PER_SNN);
4661 
4662  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
4663  copyExtFiringTable(netId, cudaMemcpyDeviceToHost);
4664  } else { // CPU runtime
4665  copyExtFiringTable(netId);
4666  }
4667 }
4668 
4669 void SNN::fetchTimeTable(int netId) {
4670  assert(netId < MAX_NET_PER_SNN);
4671 
4672  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
4673  copyTimeTable(netId, cudaMemcpyDeviceToHost);
4674  } else {
4675  copyTimeTable(netId, true);
4676  }
4677 }
4678 
4679 void SNN::writeBackTimeTable(int netId) {
4680  assert(netId < MAX_NET_PER_SNN);
4681 
4682  if (netId < CPU_RUNTIME_BASE) { // GPU runtime
4683  copyTimeTable(netId, cudaMemcpyHostToDevice);
4684  } else {
4685  copyTimeTable(netId, false);
4686  }
4687 }
4688 
4689 void SNN::transferSpikes(void* dest, int destNetId, void* src, int srcNetId, int size) {
4690 #ifndef __NO_CUDA__
4691  if (srcNetId < CPU_RUNTIME_BASE && destNetId < CPU_RUNTIME_BASE) {
4692  checkAndSetGPUDevice(destNetId);
4693  CUDA_CHECK_ERRORS(cudaMemcpyPeer(dest, destNetId, src, srcNetId, size));
4694  } else if (srcNetId >= CPU_RUNTIME_BASE && destNetId < CPU_RUNTIME_BASE) {
4695  checkAndSetGPUDevice(destNetId);
4696  CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice));
4697  } else if (srcNetId < CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE) {
4698  checkAndSetGPUDevice(srcNetId);
4699  CUDA_CHECK_ERRORS(cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost));
4700  } else if(srcNetId >= CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE) {
4701  memcpy(dest, src, size);
4702  }
4703 #else
4704  assert(srcNetId >= CPU_RUNTIME_BASE && destNetId >= CPU_RUNTIME_BASE);
4705  memcpy(dest, src, size);
4706 #endif
4707 }
4708 
4709 void SNN::convertExtSpikesD2(int netId, int startIdx, int endIdx, int GtoLOffset) {
4710  if (netId < CPU_RUNTIME_BASE)
4711  convertExtSpikesD2_GPU(netId, startIdx, endIdx, GtoLOffset);
4712  else
4713  convertExtSpikesD2_CPU(netId, startIdx, endIdx, GtoLOffset);
4714 }
4715 
4716 void SNN::convertExtSpikesD1(int netId, int startIdx, int endIdx, int GtoLOffset) {
4717  if (netId < CPU_RUNTIME_BASE)
4718  convertExtSpikesD1_GPU(netId, startIdx, endIdx, GtoLOffset);
4719  else
4720  convertExtSpikesD1_CPU(netId, startIdx, endIdx, GtoLOffset);
4721 }
4722 
4723 void SNN::routeSpikes() {
4724  int firingTableIdxD2, firingTableIdxD1;
4725  int GtoLOffset;
4726 
4727  for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) {
4728  int srcNetId = rteItr->srcNetId;
4729  int destNetId = rteItr->destNetId;
4730 
4731  fetchExtFiringTable(srcNetId);
4732 
4733  fetchTimeTable(destNetId);
4734  firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
4735  firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
4736  //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
4737  //printf("srcNetId %d,destNetId %d, D1:%d/D2:%d\n", srcNetId, destNetId, firingTableIdxD1, firingTableIdxD2);
4738 
4739  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4740  pthread_t threads[(2 * networkConfigs[srcNetId].numGroups) + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
4741  cpu_set_t cpus;
4742  ThreadStruct argsThreadRoutine[(2 * networkConfigs[srcNetId].numGroups) + 1]; // same as above, +1 array size
4743  int threadCount = 0;
4744  #endif
4745 
4746  for (int lGrpId = 0; lGrpId < networkConfigs[srcNetId].numGroups; lGrpId++) {
4747  if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
4748  // search GtoLOffset of the neural group at destination local network
4749  bool isFound = false;
4750  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4751  if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4752  GtoLOffset = grpIt->GtoLOffset;
4753  isFound = true;
4754  break;
4755  }
4756  }
4757 
4758  if (isFound) {
4759  transferSpikes(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId,
4760  managerRuntimeData.extFiringTableD2[lGrpId], srcNetId,
4761  sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]);
4762 
4763  if (destNetId < CPU_RUNTIME_BASE){
4764  convertExtSpikesD2_GPU(destNetId, firingTableIdxD2,
4765  firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
4766  GtoLOffset); // [StartIdx, EndIdx)
4767  }
4768  else{// CPU runtime
4769  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4770  convertExtSpikesD2_CPU(destNetId, firingTableIdxD2,
4771  firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
4772  GtoLOffset); // [StartIdx, EndIdx)
4773  #else // Linux or MAC
4774  pthread_attr_t attr;
4775  pthread_attr_init(&attr);
4776  CPU_ZERO(&cpus);
4777  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
4778  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
4779 
4780  argsThreadRoutine[threadCount].snn_pointer = this;
4781  argsThreadRoutine[threadCount].netId = destNetId;
4782  argsThreadRoutine[threadCount].lGrpId = 0;
4783  argsThreadRoutine[threadCount].startIdx = firingTableIdxD2;
4784  argsThreadRoutine[threadCount].endIdx = firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
4785  argsThreadRoutine[threadCount].GtoLOffset = GtoLOffset;
4786 
4787  pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD2_CPU, (void*)&argsThreadRoutine[threadCount]);
4788  pthread_attr_destroy(&attr);
4789  threadCount++;
4790  #endif
4791  }
4792 
4793  firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
4794  }
4795  }
4796 
4797  if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
4798  // search GtoLOffset of the neural group at destination local network
4799  bool isFound = false;
4800  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
4801  if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) {
4802  GtoLOffset = grpIt->GtoLOffset;
4803  isFound = true;
4804  break;
4805  }
4806  }
4807 
4808  if (isFound) {
4809  transferSpikes(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId,
4810  managerRuntimeData.extFiringTableD1[lGrpId], srcNetId,
4811  sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]);
4812  if (destNetId < CPU_RUNTIME_BASE){
4813  convertExtSpikesD1_GPU(destNetId, firingTableIdxD1,
4814  firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
4815  GtoLOffset); // [StartIdx, EndIdx)
4816  }
4817  else{// CPU runtime
4818  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
4819  convertExtSpikesD1_CPU(destNetId, firingTableIdxD1,
4820  firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
4821  GtoLOffset); // [StartIdx, EndIdx)
4822  #else // Linux or MAC
4823  pthread_attr_t attr;
4824  pthread_attr_init(&attr);
4825  CPU_ZERO(&cpus);
4826  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
4827  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
4828 
4829  argsThreadRoutine[threadCount].snn_pointer = this;
4830  argsThreadRoutine[threadCount].netId = destNetId;
4831  argsThreadRoutine[threadCount].lGrpId = 0;
4832  argsThreadRoutine[threadCount].startIdx = firingTableIdxD1;
4833  argsThreadRoutine[threadCount].endIdx = firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
4834  argsThreadRoutine[threadCount].GtoLOffset = GtoLOffset;
4835 
4836  pthread_create(&threads[threadCount], &attr, &SNN::helperConvertExtSpikesD1_CPU, (void*)&argsThreadRoutine[threadCount]);
4837  pthread_attr_destroy(&attr);
4838  threadCount++;
4839  #endif
4840  }
4841  firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
4842  }
4843  }
4844  //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
4845  }
4846 
4847  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
4848  // join all the threads
4849  for (int i=0; i<threadCount; i++){
4850  pthread_join(threads[i], NULL);
4851  }
4852  #endif
4853 
4854  managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
4855  managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
4856  writeBackTimeTable(destNetId);
4857  }
4858 }
4859 
4860 //We need pass the neuron id (nid) and the grpId just for the case when we want to
4861 //ramp up/down the weights. In that case we need to set the weights of each synapse
4862 //depending on their nid (their position with respect to one another). -- KDC
4863 float SNN::generateWeight(int connProp, float initWt, float maxWt, int nid, int grpId) {
4864  float actWts;
4866  //bool setRandomWeights = GET_INITWTS_RANDOM(connProp);
4867  //bool setRampDownWeights = GET_INITWTS_RAMPDOWN(connProp);
4868  //bool setRampUpWeights = GET_INITWTS_RAMPUP(connProp);
4869 
4870  //if (setRandomWeights)
4871  // actWts = initWt * drand48();
4872  //else if (setRampUpWeights)
4873  // actWts = (initWt + ((nid - groupConfigs[0][grpId].StartN) * (maxWt - initWt) / groupConfigs[0][grpId].SizeN));
4874  //else if (setRampDownWeights)
4875  // actWts = (maxWt - ((nid - groupConfigs[0][grpId].StartN) * (maxWt - initWt) / groupConfigs[0][grpId].SizeN));
4876  //else
4877  actWts = initWt;
4878 
4879  return actWts;
4880 }
4881 
4882 // checks whether a connection ID contains plastic synapses O(#connections)
4883 bool SNN::isConnectionPlastic(short int connId) {
4884  assert(connId != ALL);
4885  assert(connId < numConnections);
4886 
4887  return GET_FIXED_PLASTIC(connectConfigMap[connId].connProp);
4888 }
4889 
4890 // FIXME: distinguish the function call at CONFIG_STATE and SETUP_STATE, where groupConfigs[0][] might not be available
4891 // or groupConfigMap is not sync with groupConfigs[0][]
4892 // returns whether group has homeostasis enabled
4894  assert(grpId>=0 && grpId<getNumGroups());
4895  return (groupConfigMap[grpId].homeoConfig.WithHomeostasis);
4896 }
4897 
4898 // performs various verification checkups before building the network
4899 void SNN::verifyNetwork() {
4900  // make sure number of neuron parameters have been accumulated correctly
4901  // NOTE: this used to be updateParameters
4902  //verifyNumNeurons();
4903 
4904  // make sure compartment config is valid
4905  verifyCompartments();
4906 
4907  // make sure STDP post-group has some incoming plastic connections
4908  verifySTDP();
4909 
4910  // make sure every group with homeostasis also has STDP
4911  verifyHomeostasis();
4912 
4913  // make sure the max delay is within bound
4914  assert(glbNetworkConfig.maxDelay <= MAX_SYN_DELAY);
4915 
4916  // make sure there is sufficient buffer
4917  //if ((networkConfigs[0].maxSpikesD1 + networkConfigs[0].maxSpikesD2) < (numNExcReg + numNInhReg + numNPois) * UNKNOWN_NEURON_MAX_FIRING_RATE) {
4918  // KERNEL_ERROR("Insufficient amount of buffer allocated...");
4919  // exitSimulation(1);
4920  //}
4921 
4922  //make sure the number of pre- and post-connection does not exceed the limitation
4923  //if (maxNumPostSynGrp > MAX_NUM_POST_SYN) {
4924  // for (int g = 0; g < numGroups; g++) {
4925  // if (groupConfigMap[g].numPostSynapses>MAX_NUM_POST_SYN)
4926  // KERNEL_ERROR("Grp: %s(%d) has too many output synapses (%d), max %d.",groupInfo[g].Name.c_str(),g,
4927  // groupConfigMap[g].numPostSynapses,MAX_NUM_POST_SYN);
4928  // }
4929  // assert(maxNumPostSynGrp <= MAX_NUM_POST_SYN);
4930  //}
4931 
4932  //if (maxNumPreSynGrp > MAX_NUM_PRE_SYN) {
4933  // for (int g = 0; g < numGroups; g++) {
4934  // if (groupConfigMap[g].numPreSynapses>MAX_NUM_PRE_SYN)
4935  // KERNEL_ERROR("Grp: %s(%d) has too many input synapses (%d), max %d.",groupInfo[g].Name.c_str(),g,
4936  // groupConfigMap[g].numPreSynapses,MAX_NUM_PRE_SYN);
4937  // }
4938  // assert(maxNumPreSynGrp <= MAX_NUM_PRE_SYN);
4939  //}
4940 
4941  // make sure maxDelay == 1 if STP is enableed
4942  // \FIXME: need to figure out STP buffer for delays > 1
4943  if (sim_with_stp && glbNetworkConfig.maxDelay > 1) {
4944  KERNEL_ERROR("STP with delays > 1 ms is currently not supported.");
4945  exitSimulation(1);
4946  }
4947 
4948  if (glbNetworkConfig.maxDelay > MAX_SYN_DELAY) {
4949  KERNEL_ERROR("You are using a synaptic delay (%d) greater than MAX_SYN_DELAY defined in config.h", glbNetworkConfig.maxDelay);
4950  exitSimulation(1);
4951  }
4952 }
4953 
4954 void SNN::verifyCompartments() {
4955  for (std::map<int, compConnectConfig>::iterator it = compConnectConfigMap.begin(); it != compConnectConfigMap.end(); it++)
4956  {
4957  int grpLower = it->second.grpSrc;
4958  int grpUpper = it->second.grpDest;
4959 
4960  // make sure groups are compartmentally enabled
4961  if (!groupConfigMap[grpLower].withCompartments) {
4962  KERNEL_ERROR("Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4963  groupConfigMap[grpLower].grpName.c_str(), grpLower);
4964  exitSimulation(1);
4965  }
4966  if (!groupConfigMap[grpUpper].withCompartments) {
4967  KERNEL_ERROR("Group %s(%d) is not compartmentally enabled, cannot be part of a compartmental connection.",
4968  groupConfigMap[grpUpper].grpName.c_str(), grpUpper);
4969  exitSimulation(1);
4970  }
4971  }
4972 }
4973 
4974 // checks whether STDP is set on a post-group with incoming plastic connections
4975 void SNN::verifySTDP() {
4976  for (int gGrpId=0; gGrpId<getNumGroups(); gGrpId++) {
4977  if (groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
4978  // for each post-group, check if any of the incoming connections are plastic
4979  bool isAnyPlastic = false;
4980  for (std::map<int, ConnectConfig>::iterator it = connectConfigMap.begin(); it != connectConfigMap.end(); it++) {
4981  if (it->second.grpDest == gGrpId) {
4982  // get syn wt type from connection property
4983  isAnyPlastic |= GET_FIXED_PLASTIC(it->second.connProp);
4984  if (isAnyPlastic) {
4985  // at least one plastic connection found: break while
4986  break;
4987  }
4988  }
4989  }
4990  if (!isAnyPlastic) {
4991  KERNEL_ERROR("If STDP on group %d (%s) is set, group must have some incoming plastic connections.",
4992  gGrpId, groupConfigMap[gGrpId].grpName.c_str());
4993  exitSimulation(1);
4994  }
4995  }
4996  }
4997 }
4998 
4999 // checks whether every group with Homeostasis also has STDP
5000 void SNN::verifyHomeostasis() {
5001  for (int gGrpId=0; gGrpId<getNumGroups(); gGrpId++) {
5002  if (groupConfigMap[gGrpId].homeoConfig.WithHomeostasis) {
5003  if (!groupConfigMap[gGrpId].stdpConfig.WithSTDP) {
5004  KERNEL_ERROR("If homeostasis is enabled on group %d (%s), then STDP must be enabled, too.",
5005  gGrpId, groupConfigMap[gGrpId].grpName.c_str());
5006  exitSimulation(1);
5007  }
5008  }
5009  }
5010 }
5011 
5013 //void SNN::verifyNumNeurons() {
5014 // int nExcPois = 0;
5015 // int nInhPois = 0;
5016 // int nExcReg = 0;
5017 // int nInhReg = 0;
5018 //
5019 // // scan all the groups and find the required information
5020 // // about the group (numN, numPostSynapses, numPreSynapses and others).
5021 // for(int g=0; g<numGroups; g++) {
5022 // if (groupConfigMap[g].Type==UNKNOWN_NEURON) {
5023 // KERNEL_ERROR("Unknown group for %d (%s)", g, groupInfo[g].Name.c_str());
5024 // exitSimulation(1);
5025 // }
5026 //
5027 // if (IS_INHIBITORY_TYPE(groupConfigMap[g].Type) && !(groupConfigMap[g].Type & POISSON_NEURON))
5028 // nInhReg += groupConfigMap[g].SizeN;
5029 // else if (IS_EXCITATORY_TYPE(groupConfigMap[g].Type) && !(groupConfigMap[g].Type & POISSON_NEURON))
5030 // nExcReg += groupConfigMap[g].SizeN;
5031 // else if (IS_EXCITATORY_TYPE(groupConfigMap[g].Type) && (groupConfigMap[g].Type & POISSON_NEURON))
5032 // nExcPois += groupConfigMap[g].SizeN;
5033 // else if (IS_INHIBITORY_TYPE(groupConfigMap[g].Type) && (groupConfigMap[g].Type & POISSON_NEURON))
5034 // nInhPois += groupConfigMap[g].SizeN;
5035 // }
5036 //
5037 // // check the newly gathered information with class members
5038 // if (numN != nExcReg+nInhReg+nExcPois+nInhPois) {
5039 // KERNEL_ERROR("nExcReg+nInhReg+nExcPois+nInhPois=%d does not add up to numN=%d",
5040 // nExcReg+nInhReg+nExcPois+nInhPois, numN);
5041 // exitSimulation(1);
5042 // }
5043 // if (numNReg != nExcReg+nInhReg) {
5044 // KERNEL_ERROR("nExcReg+nInhReg=%d does not add up to numNReg=%d", nExcReg+nInhReg, numNReg);
5045 // exitSimulation(1);
5046 // }
5047 // if (numNPois != nExcPois+nInhPois) {
5048 // KERNEL_ERROR("nExcPois+nInhPois=%d does not add up to numNPois=%d", nExcPois+nInhPois, numNPois);
5049 // exitSimulation(1);
5050 // }
5051 //
5052 // //printf("numN=%d == %d\n",numN,nExcReg+nInhReg+nExcPois+nInhPois);
5053 // //printf("numNReg=%d == %d\n",numNReg, nExcReg+nInhReg);
5054 // //printf("numNPois=%d == %d\n",numNPois, nExcPois+nInhPois);
5055 //
5056 // assert(numN <= 1000000);
5057 // assert((numN > 0) && (numN == numNExcReg + numNInhReg + numNPois));
5058 //}
5059 
5060 // \FIXME: not sure where this should go... maybe create some helper file?
5061 bool SNN::isPoint3DinRF(const RadiusRF& radius, const Point3D& pre, const Point3D& post) {
5062  // Note: RadiusRF rad is assumed to be the fanning in to the post neuron. So if the radius is 10 pixels, it means
5063  // that if you look at the post neuron, it will receive input from neurons that code for locations no more than
5064  // 10 pixels away. (The opposite is called a response/stimulus field.)
5065 
5066  double rfDist = getRFDist3D(radius, pre, post);
5067  return (rfDist >= 0.0 && rfDist <= 1.0);
5068 }
5069 
5070 double SNN::getRFDist3D(const RadiusRF& radius, const Point3D& pre, const Point3D& post) {
5071  // Note: RadiusRF rad is assumed to be the fanning in to the post neuron. So if the radius is 10 pixels, it means
5072  // that if you look at the post neuron, it will receive input from neurons that code for locations no more than
5073  // 10 pixels away.
5074 
5075  // ready output argument
5076  // SNN::isPoint3DinRF() will return true (connected) if rfDist e[0.0, 1.0]
5077  double rfDist = -1.0;
5078 
5079  // pre and post are connected in a generic 3D ellipsoid RF if x^2/a^2 + y^2/b^2 + z^2/c^2 <= 1.0, where
5080  // x = pre.x-post.x, y = pre.y-post.y, z = pre.z-post.z
5081  // x < 0 means: connect if y and z satisfy some constraints, but ignore x
5082  // x == 0 means: connect if y and z satisfy some constraints, and enforce pre.x == post.x
5083  if (radius.radX==0 && pre.x!=post.x || radius.radY==0 && pre.y!=post.y || radius.radZ==0 && pre.z!=post.z) {
5084  rfDist = -1.0;
5085  } else {
5086  // 3D ellipsoid: x^2/a^2 + y^2/b^2 + z^2/c^2 <= 1.0
5087  double xTerm = (radius.radX<=0) ? 0.0 : pow(pre.x-post.x,2)/pow(radius.radX,2);
5088  double yTerm = (radius.radY<=0) ? 0.0 : pow(pre.y-post.y,2)/pow(radius.radY,2);
5089  double zTerm = (radius.radZ<=0) ? 0.0 : pow(pre.z-post.z,2)/pow(radius.radZ,2);
5090  rfDist = xTerm + yTerm + zTerm;
5091  }
5092 
5093  return rfDist;
5094 }
5095 
5096 void SNN::partitionSNN() {
5097  int numAssignedNeurons[MAX_NET_PER_SNN] = {0};
5098 
5099  // get number of available GPU card(s) in the present machine
5100  numAvailableGPUs = configGPUDevice();
5101 
5102  for (std::map<int, GroupConfigMD>::iterator grpIt = groupConfigMDMap.begin(); grpIt != groupConfigMDMap.end(); grpIt++) {
5103  // assign a group to the GPU specified by users
5104  int gGrpId = grpIt->second.gGrpId;
5105  int netId = groupConfigMap[gGrpId].preferredNetId;
5106  if (netId != ANY) {
5107  assert(netId > ANY && netId < MAX_NET_PER_SNN);
5108  grpIt->second.netId = netId;
5109  numAssignedNeurons[netId] += groupConfigMap[gGrpId].numN;
5110  groupPartitionLists[netId].push_back(grpIt->second); // Copy by value, create a copy
5111  } else { // netId == ANY
5112  // TODO: add callback function that allow user to partition network by theirself
5113  // FIXME: make sure GPU(s) is available first
5114  // this parse separates groups into each local network and assign each group a netId
5115  if (preferredSimMode_ == CPU_MODE) {
5116  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
5117  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
5118  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
5119  } else if (preferredSimMode_ == GPU_MODE) {
5120  grpIt->second.netId = GPU_RUNTIME_BASE; // GPU 0
5121  numAssignedNeurons[GPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
5122  groupPartitionLists[GPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
5123  } else if (preferredSimMode_ == HYBRID_MODE) {
5124  // TODO: implement partition algorithm, use naive partition for now (allocate to CPU 0)
5125  grpIt->second.netId = CPU_RUNTIME_BASE; // CPU 0
5126  numAssignedNeurons[CPU_RUNTIME_BASE] += groupConfigMap[gGrpId].numN;
5127  groupPartitionLists[CPU_RUNTIME_BASE].push_back(grpIt->second); // Copy by value, create a copy
5128  } else {
5129  KERNEL_ERROR("Unkown simulation mode");
5130  exitSimulation(-1);
5131  }
5132  }
5133 
5134  if (grpIt->second.netId == -1) { // the group was not assigned to any computing backend
5135  KERNEL_ERROR("Can't assign the group [%d] to any partition", grpIt->second.gGrpId);
5136  exitSimulation(-1);
5137  }
5138  }
5139 
5140  // this parse finds local connections (i.e., connection configs that conect local groups)
5141  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5142  if (!groupPartitionLists[netId].empty()) {
5143  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5144  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5145  localConnectLists[netId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
5146  }
5147  }
5148 
5149  //printf("The size of compConnectConfigMap is: %i\n", compConnectConfigMap.size());
5150  for (std::map<int, compConnectConfig>::iterator connIt = compConnectConfigMap.begin(); connIt != compConnectConfigMap.end(); connIt++) {
5151  if (groupConfigMDMap[connIt->second.grpSrc].netId == netId && groupConfigMDMap[connIt->second.grpDest].netId == netId) {
5152  localCompConnectLists[netId].push_back(compConnectConfigMap[connIt->second.connId]); // Copy by value
5153  }
5154  }
5155  }
5156  }
5157 
5158  // this parse finds external groups and external connections
5159  spikeRoutingTable.clear();
5160  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5161  if (!groupPartitionLists[netId].empty()) {
5162  for (std::map<int, ConnectConfig>::iterator connIt = connectConfigMap.begin(); connIt != connectConfigMap.end(); connIt++) {
5163  int srcNetId = groupConfigMDMap[connIt->second.grpSrc].netId;
5164  int destNetId = groupConfigMDMap[connIt->second.grpDest].netId;
5165  if (srcNetId == netId && destNetId != netId) {
5166  // search the source group in groupPartitionLists and mark it as having external connections
5167  GroupConfigMD targetGroup;
5168  std::list<GroupConfigMD>::iterator srcGrpIt, destGrpIt;
5169 
5170  targetGroup.gGrpId = connIt->second.grpSrc;
5171  srcGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5172  assert(srcGrpIt != groupPartitionLists[srcNetId].end());
5173  srcGrpIt->hasExternalConnect = true;
5174 
5175  // FIXME: fail to write external group if the only one external link across GPUs is uni directional (GPU0 -> GPU1, no GPU1 -> GPU0)
5176  targetGroup.gGrpId = connIt->second.grpDest;
5177  destGrpIt = find(groupPartitionLists[srcNetId].begin(), groupPartitionLists[srcNetId].end(), targetGroup);
5178  if (destGrpIt == groupPartitionLists[srcNetId].end()) { // the "external" dest group has not yet been copied to te "local" group partition list
5179  numAssignedNeurons[srcNetId] += groupConfigMap[connIt->second.grpDest].numN;
5180  groupPartitionLists[srcNetId].push_back(groupConfigMDMap[connIt->second.grpDest]);
5181  }
5182 
5183  targetGroup.gGrpId = connIt->second.grpSrc;
5184  srcGrpIt = find(groupPartitionLists[destNetId].begin(), groupPartitionLists[destNetId].end(), targetGroup);
5185  if (srcGrpIt == groupPartitionLists[destNetId].end()) {
5186  numAssignedNeurons[destNetId] += groupConfigMap[connIt->second.grpSrc].numN;
5187  groupPartitionLists[destNetId].push_back(groupConfigMDMap[connIt->second.grpSrc]);
5188  }
5189 
5190  externalConnectLists[srcNetId].push_back(connectConfigMap[connIt->second.connId]); // Copy by value
5191 
5192  // build the spike routing table by the way
5193  //printf("%d,%d -> %d,%d\n", srcNetId, connIt->second.grpSrc, destNetId, connIt->second.grpDest);
5194  RoutingTableEntry rte(srcNetId, destNetId);
5195  spikeRoutingTable.push_back(rte);
5196  }
5197  }
5198  }
5199  }
5200 
5201  spikeRoutingTable.unique();
5202 
5203  // assign local neuron ids and, local group ids for each local network in the order
5204  // MPORTANT : NEURON ORGANIZATION/ARRANGEMENT MAP
5205  // <--- Excitatory --> | <-------- Inhibitory REGION ----------> | <-- Excitatory --> | <-- External -->
5206  // Excitatory-Regular | Inhibitory-Regular | Inhibitory-Poisson | Excitatory-Poisson | External Neurons
5207  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5208  if (!groupPartitionLists[netId].empty()) {
5209  int availableNeuronId = 0;
5210  int localGroupId = 0;
5211  for (int order = 0; order < 5; order++) {
5212  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++) {
5213  unsigned int type = groupConfigMap[grpIt->gGrpId].type;
5214  if (IS_EXCITATORY_TYPE(type) && (type & POISSON_NEURON) && order == 3 && grpIt->netId == netId) {
5215  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5216  localGroupId++;
5217  } else if (IS_INHIBITORY_TYPE(type) && (type & POISSON_NEURON) && order == 2 && grpIt->netId == netId) {
5218  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5219  localGroupId++;
5220  } else if (IS_EXCITATORY_TYPE(type) && !(type & POISSON_NEURON) && order == 0 && grpIt->netId == netId) {
5221  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5222  localGroupId++;
5223  } else if (IS_INHIBITORY_TYPE(type) && !(type & POISSON_NEURON) && order == 1 && grpIt->netId == netId) {
5224  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5225  localGroupId++;
5226  } else if (order == 4 && grpIt->netId != netId) {
5227  availableNeuronId = assignGroup(grpIt, localGroupId, availableNeuronId);
5228  localGroupId++;
5229  }
5230  }
5231  }
5232  assert(availableNeuronId == numAssignedNeurons[netId]);
5233  assert(localGroupId == groupPartitionLists[netId].size());
5234  }
5235  }
5236 
5237 
5238  // generation connections among groups according to group and connect configs
5239  // update ConnectConfig::numberOfConnections
5240  // update GroupConfig::numPostSynapses, GroupConfig::numPreSynapses
5241  if (loadSimFID == NULL) {
5242  connectNetwork();
5243  } else {
5244  KERNEL_INFO("Load Simulation");
5245  loadSimulation_internal(false); // true or false doesn't matter here
5246  }
5247 
5248  collectGlobalNetworkConfigP();
5249 
5250  // print group and connection overview
5251  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5252  if (!groupPartitionLists[netId].empty()) {
5253  KERNEL_INFO("\n+ Local Network (%d)", netId);
5254  KERNEL_INFO("|-+ Group List:");
5255  for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[netId].begin(); grpIt != groupPartitionLists[netId].end(); grpIt++)
5256  printGroupInfo(netId, grpIt);
5257  }
5258 
5259  if (!localConnectLists[netId].empty() || !externalConnectLists[netId].empty()) {
5260  KERNEL_INFO("|-+ Connection List:");
5261  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netId].begin(); connIt != localConnectLists[netId].end(); connIt++)
5262  printConnectionInfo(netId, connIt);
5263 
5264  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netId].begin(); connIt != externalConnectLists[netId].end(); connIt++)
5265  printConnectionInfo(netId, connIt);
5266  }
5267  }
5268 
5269  // print spike routing table
5270  printSikeRoutingInfo();
5271 
5272  snnState = PARTITIONED_SNN;
5273 }
5274 
5275 int SNN::loadSimulation_internal(bool onlyPlastic) {
5276  // TSC: so that we can restore the file position later...
5277  // MB: not sure why though...
5278  long file_position = ftell(loadSimFID);
5279 
5280  int tmpInt;
5281  float tmpFloat;
5282 
5283  bool readErr = false; // keep track of reading errors
5284  size_t result;
5285 
5286 
5287  // ------- read header ----------------
5288 
5289  fseek(loadSimFID, 0, SEEK_SET);
5290 
5291  // read file signature
5292  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5293  readErr |= (result!=1);
5294  if (tmpInt != 294338571) {
5295  KERNEL_ERROR("loadSimulation: Unknown file signature. This does not seem to be a "
5296  "simulation file created with CARLsim::saveSimulation.");
5297  exitSimulation(-1);
5298  }
5299 
5300  // read file version number
5301  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
5302  readErr |= (result!=1);
5303  if (tmpFloat > 0.3f) {
5304  KERNEL_ERROR("loadSimulation: Unsupported version number (%f)",tmpFloat);
5305  exitSimulation(-1);
5306  }
5307 
5308  // read simulation time
5309  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
5310  readErr |= (result!=1);
5311 
5312  // read execution time
5313  result = fread(&tmpFloat, sizeof(float), 1, loadSimFID);
5314  readErr |= (result!=1);
5315 
5316  // read number of neurons
5317  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5318  readErr |= (result!=1);
5319  if (tmpInt != glbNetworkConfig.numN) {
5320  KERNEL_ERROR("loadSimulation: Number of neurons in file (%d) and simulation (%d) don't match.",
5321  tmpInt, glbNetworkConfig.numN);
5322  exitSimulation(-1);
5323  }
5324 
5325  // skip save and read pre-synapses & post-synapses in CARLsim4 since they are now netID based
5326  // read number of pre-synapses
5327  // result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5328  // readErr |= (result!=1);
5329  // if (numPreSynNet != tmpInt) {
5330  // KERNEL_ERROR("loadSimulation: numPreSynNet in file (%d) and simulation (%d) don't match.",
5331  // tmpInt, numPreSynNet);
5332  // exitSimulation(-1);
5333  // }
5334 
5336  //result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5337  //readErr |= (result!=1);
5338  //if (numPostSynNet != tmpInt) {
5339  // KERNEL_ERROR("loadSimulation: numPostSynNet in file (%d) and simulation (%d) don't match.",
5340  // tmpInt, numPostSynNet);
5341  // exitSimulation(-1);
5342  //}
5343 
5344  // read number of groups
5345  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5346  readErr |= (result!=1);
5347  if (tmpInt != numGroups) {
5348  KERNEL_ERROR("loadSimulation: Number of groups in file (%d) and simulation (%d) don't match.",
5349  tmpInt, numGroups);
5350  exitSimulation(-1);
5351  }
5352 
5353  // throw reading error instead of proceeding
5354  if (readErr) {
5355  fprintf(stderr,"loadSimulation: Error while reading file header");
5356  exitSimulation(-1);
5357  }
5358 
5359 
5360  // ------- read group information ----------------
5361  for (int g=0; g<numGroups; g++) {
5362  // read StartN
5363  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5364  readErr |= (result!=1);
5365  if (tmpInt != groupConfigMDMap[g].gStartN) {
5366  KERNEL_ERROR("loadSimulation: StartN in file (%d) and grpInfo (%d) for group %d don't match.",
5367  tmpInt, groupConfigMDMap[g].gStartN, g);
5368  exitSimulation(-1);
5369  }
5370 
5371  // read EndN
5372  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5373  readErr |= (result!=1);
5374  if (tmpInt != groupConfigMDMap[g].gEndN) {
5375  KERNEL_ERROR("loadSimulation: EndN in file (%d) and grpInfo (%d) for group %d don't match.",
5376  tmpInt, groupConfigMDMap[g].gEndN, g);
5377  exitSimulation(-1);
5378  }
5379 
5380  // read SizeX
5381  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5382  readErr |= (result!=1);
5383  if (tmpInt != groupConfigMap[g].grid.numX) {
5384  KERNEL_ERROR("loadSimulation: numX in file (%d) and grpInfo (%d) for group %d don't match.",
5385  tmpInt, groupConfigMap[g].grid.numX, g);
5386  exitSimulation(-1);
5387  }
5388 
5389 
5390  // read SizeY
5391  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5392  readErr |= (result!=1);
5393  if (tmpInt != groupConfigMap[g].grid.numY) {
5394  KERNEL_ERROR("loadSimulation: numY in file (%d) and grpInfo (%d) for group %d don't match.",
5395  tmpInt, groupConfigMap[g].grid.numY, g);
5396  exitSimulation(-1);
5397  }
5398 
5399 
5400  // read SizeZ
5401  result = fread(&tmpInt, sizeof(int), 1, loadSimFID);
5402  readErr |= (result!=1);
5403  if (tmpInt != groupConfigMap[g].grid.numZ) {
5404  KERNEL_ERROR("loadSimulation: numZ in file (%d) and grpInfo (%d) for group %d don't match.",
5405  tmpInt, groupConfigMap[g].grid.numZ, g);
5406  exitSimulation(-1);
5407  }
5408 
5409 
5410  // read group name
5411  char name[100];
5412  result = fread(name, sizeof(char), 100, loadSimFID);
5413  readErr |= (result!=100);
5414  if (strcmp(name,groupConfigMap[g].grpName.c_str()) != 0) {
5415  KERNEL_ERROR("loadSimulation: Group names in file (%s) and grpInfo (%s) don't match.", name,
5416  groupConfigMap[g].grpName.c_str());
5417  exitSimulation(-1);
5418  }
5419  }
5420 
5421  if (readErr) {
5422  KERNEL_ERROR("loadSimulation: Error while reading group info");
5423  exitSimulation(-1);
5424  }
5425  // // read weight
5426  // result = fread(&weight, sizeof(float), 1, loadSimFID);
5427  // readErr |= (result!=1);
5428 
5429  // short int gIDpre = managerRuntimeData.grpIds[nIDpre];
5430  // if (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (weight>0)
5431  // || !IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (weight<0)) {
5432  // KERNEL_ERROR("loadSimulation: Sign of weight value (%s) does not match neuron type (%s)",
5433  // ((weight>=0.0f)?"plus":"minus"),
5434  // (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type)?"inhibitory":"excitatory"));
5435  // exitSimulation(-1);
5436  // }
5437 
5438  // // read max weight
5439  // result = fread(&maxWeight, sizeof(float), 1, loadSimFID);
5440  // readErr |= (result!=1);
5441  // if (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (maxWeight>=0)
5442  // || !IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type) && (maxWeight<=0)) {
5443  // KERNEL_ERROR("loadSimulation: Sign of maxWeight value (%s) does not match neuron type (%s)",
5444  // ((maxWeight>=0.0f)?"plus":"minus"),
5445  // (IS_INHIBITORY_TYPE(groupConfigs[0][gIDpre].Type)?"inhibitory":"excitatory"));
5446  // exitSimulation(-1);
5447  // }
5448 
5449  // ------- read synapse information ----------------
5450  int net_count = 0;
5451  result = fread(&net_count, sizeof(int), 1, loadSimFID);
5452  readErr |= (result!=1);
5453 
5454  for (int i = 0; i < net_count; i++) {
5455  int synapse_count = 0;
5456  result = fread(&synapse_count, sizeof(int), 1, loadSimFID);
5457  for (int j = 0; j < synapse_count; j++) {
5458  int gGrpIdPre;
5459  int gGrpIdPost;
5460  int grpNIdPre;
5461  int grpNIdPost;
5462  int connId;
5463  float weight;
5464  float maxWeight;
5465  int delay;
5466 
5467  // read gGrpIdPre
5468  result = fread(&gGrpIdPre, sizeof(int), 1, loadSimFID);
5469  readErr != (result!=1);
5470 
5471  // read gGrpIdPost
5472  result = fread(&gGrpIdPost, sizeof(int), 1, loadSimFID);
5473  readErr != (result!=1);
5474 
5475  // read grpNIdPre
5476  result = fread(&grpNIdPre, sizeof(int), 1, loadSimFID);
5477  readErr != (result!=1);
5478 
5479  // read grpNIdPost
5480  result = fread(&grpNIdPost, sizeof(int), 1, loadSimFID);
5481  readErr != (result!=1);
5482 
5483  // read connId
5484  result = fread(&connId, sizeof(int), 1, loadSimFID);
5485  readErr != (result!=1);
5486 
5487  // read weight
5488  result = fread(&weight, sizeof(float), 1, loadSimFID);
5489  readErr != (result!=1);
5490 
5491  // read maxWeight
5492  result = fread(&maxWeight, sizeof(float), 1, loadSimFID);
5493  readErr != (result!=1);
5494 
5495  // read delay
5496  result = fread(&delay, sizeof(int), 1, loadSimFID);
5497  readErr != (result!=1);
5498 
5499  // check connection
5500  if (connectConfigMap[connId].grpSrc != gGrpIdPre) {
5501  KERNEL_ERROR("loadSimulation: source group in file (%d) and in simulation (%d) for connection %d don't match.",
5502  gGrpIdPre , connectConfigMap[connId].grpSrc, connId);
5503  exitSimulation(-1);
5504  }
5505 
5506  if (connectConfigMap[connId].grpDest != gGrpIdPost) {
5507  KERNEL_ERROR("loadSimulation: dest group in file (%d) and in simulation (%d) for connection %d don't match.",
5508  gGrpIdPost , connectConfigMap[connId].grpDest, connId);
5509  exitSimulation(-1);
5510  }
5511 
5512  // connect synapse
5513  // find netid for two groups
5514  int netIdPre = groupConfigMDMap[gGrpIdPre].netId;
5515  int netIdPost = groupConfigMDMap[gGrpIdPost].netId;
5516  bool isExternal = (netIdPre != netIdPost);
5517 
5518  // find global neuron id for two neurons
5519  int globalNIdPre = groupConfigMDMap[gGrpIdPre].gStartN + grpNIdPre;
5520  int globalNIdPost = groupConfigMDMap[gGrpIdPost].gStartN + grpNIdPost;
5521 
5522  bool connected =false;
5523  if (!isExternal) {
5524  for (std::list<ConnectConfig>::iterator connIt = localConnectLists[netIdPre].begin(); connIt != localConnectLists[netIdPre].end() && (!connected); connIt++) {
5525  if (connIt->connId == connId) {
5526  // connect two neurons
5527  connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, -1);
5528  connected = true;
5529  // update connection information
5530  connIt->numberOfConnections++;
5531  std::list<GroupConfigMD>::iterator grpIt;
5532 
5533  // fix me maybe: numPostSynapses and numPreSynpases could also be loaded from saved information directly to save time
5534  // the current implementation is a safer one
5535  GroupConfigMD targetGrp;
5536 
5537  targetGrp.gGrpId = gGrpIdPre;
5538  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5539  assert(grpIt != groupPartitionLists[netIdPre].end());
5540  grpIt->numPostSynapses += 1;
5541 
5542  targetGrp.gGrpId = gGrpIdPost;
5543  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5544  assert(grpIt != groupPartitionLists[netIdPost].end());
5545  grpIt->numPreSynapses += 1;
5546  }
5547  }
5548  } else {
5549  for (std::list<ConnectConfig>::iterator connIt = externalConnectLists[netIdPre].begin(); connIt != externalConnectLists[netIdPre].end() && (!connected); connIt++) {
5550  if (connIt->connId == connId) {
5551  // connect two neurons
5552  connectNeurons(netIdPre, gGrpIdPre, gGrpIdPost, globalNIdPre, globalNIdPost, connId, weight, maxWeight, delay, netIdPost);
5553  connected = true;
5554  // update connection information
5555  connIt->numberOfConnections++;
5556 
5557  // fix me maybe: numPostSynapses and numPreSynpases could also be loaded from saved information directly to save time
5558  // the current implementation is a safer one
5559  GroupConfigMD targetGrp;
5560  std::list<GroupConfigMD>::iterator grpIt;
5561 
5562  targetGrp.gGrpId = gGrpIdPre;
5563  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5564  assert(grpIt != groupPartitionLists[netIdPre].end());
5565  grpIt->numPostSynapses += 1;
5566 
5567  targetGrp.gGrpId = gGrpIdPost;
5568  grpIt = std::find(groupPartitionLists[netIdPre].begin(), groupPartitionLists[netIdPre].end(), targetGrp);
5569  assert(grpIt != groupPartitionLists[netIdPost].end());
5570  grpIt->numPreSynapses += 1;
5571 
5572  // update group information in another network
5573  targetGrp.gGrpId = gGrpIdPre;
5574  grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5575  assert(grpIt != groupPartitionLists[netIdPost].end());
5576  grpIt->numPostSynapses += 1;
5577 
5578  targetGrp.gGrpId = gGrpIdPost;
5579  grpIt = std::find(groupPartitionLists[netIdPost].begin(), groupPartitionLists[netIdPost].end(), targetGrp);
5580  assert(grpIt != groupPartitionLists[netIdPost].end());
5581  grpIt->numPreSynapses += 1;
5582  }
5583  }
5584  }
5585  }
5586  }
5587 
5588  fseek(loadSimFID,file_position,SEEK_SET);
5589 
5590  return 0;
5591 }
5592 
5593 void SNN::generateRuntimeSNN() {
5594  // 1. genearte configurations for the simulation
5595  // generate (copy) group configs from groupPartitionLists[]
5596  generateRuntimeGroupConfigs();
5597 
5598  // generate (copy) connection configs from localConnectLists[] and exeternalConnectLists[]
5599  generateRuntimeConnectConfigs();
5600 
5601  // generate local network configs and accquire maximum size of rumtime data
5602  generateRuntimeNetworkConfigs();
5603 
5604  // 2. allocate space of runtime data used by the manager
5605  // - allocate firingTableD1, firingTableD2, timeTableD1, timeTableD2
5606  // - reset firingTableD1, firingTableD2, timeTableD1, timeTableD2
5607  allocateManagerSpikeTables();
5608  // - allocate voltage, recovery, Izh_a, Izh_b, Izh_c, Izh_d, current, extCurrent, gAMPA, gNMDA, gGABAa, gGABAb
5609  // lastSpikeTime, nSpikeCnt, stpu, stpx, Npre, Npre_plastic, Npost, cumulativePost, cumulativePre,
5610  // postSynapticIds, postDelayInfo, wt, wtChange, synSpikeTime, maxSynWt, preSynapticIds, grpIds, connIdsPreIdx,
5611  // grpDA, grp5HT, grpACh, grpNE, grpDABuffer, grp5HTBuffer, grpAChBuffer, grpNEBuffer, mulSynFast, mulSynSlow
5612  // - reset all above
5613  allocateManagerRuntimeData();
5614 
5615  // 3. initialize manager runtime data according to partitions (i.e., local networks)
5616  // 4a. allocate appropriate memory space (e.g., main memory (CPU) or device memory (GPU)).
5617  // 4b. load (copy) them to appropriate memory space for execution
5618  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5619  if (!groupPartitionLists[netId].empty()) {
5620  KERNEL_INFO("");
5621  if (netId < CPU_RUNTIME_BASE) {
5622  KERNEL_INFO("***************** Initializing GPU %d Runtime *************************", netId);
5623  } else {
5624  KERNEL_INFO("***************** Initializing CPU %d Runtime *************************", (netId - CPU_RUNTIME_BASE));
5625  }
5626  // build the runtime data according to local network, group, connection configuirations
5627 
5628  // generate runtime data for each group
5629  for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
5630  // local poisson neurons
5631  if (groupConfigs[netId][lGrpId].netId == netId && (groupConfigs[netId][lGrpId].Type & POISSON_NEURON)) {
5632  // - init lstSpikeTime
5633  // - reset avgFiring, stpu, stpx
5634  // - init stpx
5635  generatePoissonGroupRuntime(netId, lGrpId);
5636  }
5637  // local regular neurons
5638  if (groupConfigs[netId][lGrpId].netId == netId && !(groupConfigs[netId][lGrpId].Type & POISSON_NEURON)) {
5639  // - init grpDA, grp5HT, grpACh, grpNE
5640  // - init Izh_a, Izh_b, Izh_c, Izh_d, voltage, recovery, stpu, stpx
5641  // - init baseFiring, avgFiring
5642  // - init lastSpikeTime
5643  generateGroupRuntime(netId, lGrpId);
5644  }
5645  }
5646 
5647  // - init grpIds
5648  for (int lNId = 0; lNId < networkConfigs[netId].numNAssigned; lNId++) {
5649  managerRuntimeData.grpIds[lNId] = -1;
5650  for(int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) {
5651  if (lNId >= groupConfigs[netId][lGrpId].lStartN && lNId <= groupConfigs[netId][lGrpId].lEndN) {
5652  managerRuntimeData.grpIds[lNId] = (short int)lGrpId;
5653  break;
5654  }
5655  }
5656  assert(managerRuntimeData.grpIds[lNId] != -1);
5657  }
5658 
5659  // - init mulSynFast, mulSynSlow
5660  // - init Npre, Npre_plastic, Npost, cumulativePre, cumulativePost, preSynapticIds, postSynapticIds, postDelayInfo
5661  // - init wt, maxSynWt
5662  generateConnectionRuntime(netId);
5663 
5664  generateCompConnectionRuntime(netId);
5665 
5666  // - reset current
5667  resetCurrent(netId);
5668  // - reset conductance
5669  resetConductances(netId);
5670 
5671  // - reset wtChange
5672  // - init synSpikeTime
5673  resetSynapse(netId, false);
5674 
5675  allocateSNN(netId);
5676  }
5677  }
5678 
5679  // count allocated CPU/GPU runtime
5680  numGPUs = 0; numCores = 0;
5681  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
5682  if (netId < CPU_RUNTIME_BASE && runtimeData[netId].allocated)
5683  numGPUs++;
5684  if (netId >= CPU_RUNTIME_BASE && runtimeData[netId].allocated)
5685  numCores++;
5686  }
5687 
5688  // 5. declare the spiking neural network is excutable
5689  snnState = EXECUTABLE_SNN;
5690 }
5691 
5692 void SNN::resetConductances(int netId) {
5693  if (networkConfigs[netId].sim_with_conductances) {
5694  memset(managerRuntimeData.gAMPA, 0, sizeof(float) * networkConfigs[netId].numNReg);
5695  if (networkConfigs[netId].sim_with_NMDA_rise) {
5696  memset(managerRuntimeData.gNMDA_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
5697  memset(managerRuntimeData.gNMDA_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
5698  } else {
5699  memset(managerRuntimeData.gNMDA, 0, sizeof(float) * networkConfigs[netId].numNReg);
5700  }
5701  memset(managerRuntimeData.gGABAa, 0, sizeof(float) * networkConfigs[netId].numNReg);
5702  if (networkConfigs[netId].sim_with_GABAb_rise) {
5703  memset(managerRuntimeData.gGABAb_r, 0, sizeof(float) * networkConfigs[netId].numNReg);
5704  memset(managerRuntimeData.gGABAb_d, 0, sizeof(float) * networkConfigs[netId].numNReg);
5705  } else {
5706  memset(managerRuntimeData.gGABAb, 0, sizeof(float) * networkConfigs[netId].numNReg);
5707  }
5708  }
5709 }
5710 
5711 void SNN::resetCurrent(int netId) {
5712  assert(managerRuntimeData.current != NULL);
5713  memset(managerRuntimeData.current, 0, sizeof(float) * networkConfigs[netId].numNReg);
5714 }
5715 
5716 // FIXME: unused function
5717 void SNN::resetFiringInformation() {
5718  // Reset firing tables and time tables to default values..
5719 
5720  // reset various times...
5721  simTimeMs = 0;
5722  simTimeSec = 0;
5723  simTime = 0;
5724 
5725  // reset the propogation Buffer.
5726  resetPropogationBuffer();
5727  // reset Timing Table..
5728  resetTimeTable();
5729 }
5730 
5731 void SNN::resetTiming() {
5732  prevExecutionTime = cumExecutionTime;
5733  executionTime = 0.0f;
5734 }
5735 
5736 void SNN::resetNeuromodulator(int netId, int lGrpId) {
5737  managerRuntimeData.grpDA[lGrpId] = groupConfigs[netId][lGrpId].baseDP;
5738  managerRuntimeData.grp5HT[lGrpId] = groupConfigs[netId][lGrpId].base5HT;
5739  managerRuntimeData.grpACh[lGrpId] = groupConfigs[netId][lGrpId].baseACh;
5740  managerRuntimeData.grpNE[lGrpId] = groupConfigs[netId][lGrpId].baseNE;
5741 }
5742 
5746 void SNN::resetNeuron(int netId, int lGrpId, int lNId) {
5747  int gGrpId = groupConfigs[netId][lGrpId].gGrpId; // get global group id
5748  assert(lNId < networkConfigs[netId].numNReg);
5749 
5750  if (groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a == -1 && groupConfigMap[gGrpId].isLIF == 0) {
5751  KERNEL_ERROR("setNeuronParameters must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5752  exitSimulation(1);
5753  }
5754 
5755  if (groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m == -1 && groupConfigMap[gGrpId].isLIF == 1) {
5756  KERNEL_ERROR("setNeuronParametersLIF must be called for group %s (G:%d,L:%d)",groupConfigMap[gGrpId].grpName.c_str(), gGrpId, lGrpId);
5757  exitSimulation(1);
5758  }
5759 
5760  managerRuntimeData.Izh_a[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_a_sd * (float)drand48();
5761  managerRuntimeData.Izh_b[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_b_sd * (float)drand48();
5762  managerRuntimeData.Izh_c[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_c_sd * (float)drand48();
5763  managerRuntimeData.Izh_d[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_d_sd * (float)drand48();
5764  managerRuntimeData.Izh_C[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_C_sd * (float)drand48();
5765  managerRuntimeData.Izh_k[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_k_sd * (float)drand48();
5766  managerRuntimeData.Izh_vr[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vr_sd * (float)drand48();
5767  managerRuntimeData.Izh_vt[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vt_sd * (float)drand48();
5768  managerRuntimeData.Izh_vpeak[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak + groupConfigMap[gGrpId].neuralDynamicsConfig.Izh_vpeak_sd * (float)drand48();
5769  managerRuntimeData.lif_tau_m[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_m;
5770  managerRuntimeData.lif_tau_ref[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_tau_ref;
5771  managerRuntimeData.lif_tau_ref_c[lNId] = 0;
5772  managerRuntimeData.lif_vTh[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vTh;
5773  managerRuntimeData.lif_vReset[lNId] = groupConfigMap[gGrpId].neuralDynamicsConfig.lif_vReset;
5774 
5775  // calculate gain and bias for the lif neuron
5776  if (groupConfigs[netId][lGrpId].isLIF){
5777  // gain an bias of the LIF neuron is calculated based on Membrane resistance
5778  float rmRange = (float)(groupConfigMap[gGrpId].neuralDynamicsConfig.lif_maxRmem - groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem);
5779  float minRmem = (float)groupConfigMap[gGrpId].neuralDynamicsConfig.lif_minRmem;
5780  managerRuntimeData.lif_bias[lNId] = 0.0f;
5781  managerRuntimeData.lif_gain[lNId] = minRmem + rmRange * (float)drand48();
5782  }
5783 
5784  managerRuntimeData.nextVoltage[lNId] = managerRuntimeData.voltage[lNId] = groupConfigs[netId][lGrpId].isLIF ? managerRuntimeData.lif_vReset[lNId] : (groupConfigs[netId][lGrpId].withParamModel_9 ? managerRuntimeData.Izh_vr[lNId] : managerRuntimeData.Izh_c[lNId]);
5785  managerRuntimeData.recovery[lNId] = groupConfigs[netId][lGrpId].withParamModel_9 ? 0.0f : managerRuntimeData.Izh_b[lNId] * managerRuntimeData.voltage[lNId];
5786 
5787  if (groupConfigs[netId][lGrpId].WithHomeostasis) {
5788  // set the baseFiring with some standard deviation.
5789  if (drand48() > 0.5) {
5790  managerRuntimeData.baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring + groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5791  } else {
5792  managerRuntimeData.baseFiring[lNId] = groupConfigMap[gGrpId].homeoConfig.baseFiring - groupConfigMap[gGrpId].homeoConfig.baseFiringSD * -log(drand48());
5793  if(managerRuntimeData.baseFiring[lNId] < 0.1f) managerRuntimeData.baseFiring[lNId] = 0.1f;
5794  }
5795 
5796  if (groupConfigMap[gGrpId].homeoConfig.baseFiring != 0.0f) {
5797  managerRuntimeData.avgFiring[lNId] = managerRuntimeData.baseFiring[lNId];
5798  } else {
5799  managerRuntimeData.baseFiring[lNId] = 0.0f;
5800  managerRuntimeData.avgFiring[lNId] = 0.0f;
5801  }
5802  }
5803 
5804  managerRuntimeData.lastSpikeTime[lNId] = MAX_SIMULATION_TIME;
5805 
5806  if(groupConfigs[netId][lGrpId].WithSTP) {
5807  for (int j = 0; j < networkConfigs[netId].maxDelay + 1; j++) { // is of size maxDelay_+1
5808  int index = STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
5809  managerRuntimeData.stpu[index] = 0.0f;
5810  managerRuntimeData.stpx[index] = 1.0f;
5811  }
5812  }
5813 }
5814 
5815 void SNN::resetMonitors(bool deallocate) {
5816  // order is important! monitor objects might point to SNN or CARLsim,
5817  // need to deallocate them first
5818 
5819 
5820  // -------------- DEALLOCATE MONITOR OBJECTS ---------------------- //
5821 
5822  // delete all SpikeMonitor objects
5823  // don't kill SpikeMonitorCore objects, they will get killed automatically
5824  for (int i=0; i<numSpikeMonitor; i++) {
5825  if (spikeMonList[i]!=NULL && deallocate) delete spikeMonList[i];
5826  spikeMonList[i]=NULL;
5827  }
5828 
5829  // delete all NeuronMonitor objects
5830  // don't kill NeuronMonitorCore objects, they will get killed automatically
5831  for (int i = 0; i<numNeuronMonitor; i++) {
5832  if (neuronMonList[i] != NULL && deallocate) delete neuronMonList[i];
5833  neuronMonList[i] = NULL;
5834  }
5835 
5836  // delete all GroupMonitor objects
5837  // don't kill GroupMonitorCore objects, they will get killed automatically
5838  for (int i=0; i<numGroupMonitor; i++) {
5839  if (groupMonList[i]!=NULL && deallocate) delete groupMonList[i];
5840  groupMonList[i]=NULL;
5841  }
5842 
5843  // delete all ConnectionMonitor objects
5844  // don't kill ConnectionMonitorCore objects, they will get killed automatically
5845  for (int i=0; i<numConnectionMonitor; i++) {
5846  if (connMonList[i]!=NULL && deallocate) delete connMonList[i];
5847  connMonList[i]=NULL;
5848  }
5849 }
5850 
5851 void SNN::resetGroupConfigs(bool deallocate) {
5852  // clear all existing group configurations
5853  if (deallocate) groupConfigMap.clear();
5854 }
5855 
5856 void SNN::resetConnectionConfigs(bool deallocate) {
5857  // clear all existing connection configurations
5858  if (deallocate) connectConfigMap.clear();
5859 }
5860 
5861 void SNN::deleteManagerRuntimeData() {
5862  if (spikeBuf!=NULL) delete spikeBuf;
5863  if (managerRuntimeData.spikeGenBits!=NULL) delete[] managerRuntimeData.spikeGenBits;
5864  spikeBuf=NULL; managerRuntimeData.spikeGenBits=NULL;
5865 
5866  // clear data (i.e., concentration of neuromodulator) of groups
5867  if (managerRuntimeData.grpDA != NULL) delete [] managerRuntimeData.grpDA;
5868  if (managerRuntimeData.grp5HT != NULL) delete [] managerRuntimeData.grp5HT;
5869  if (managerRuntimeData.grpACh != NULL) delete [] managerRuntimeData.grpACh;
5870  if (managerRuntimeData.grpNE != NULL) delete [] managerRuntimeData.grpNE;
5871  managerRuntimeData.grpDA = NULL;
5872  managerRuntimeData.grp5HT = NULL;
5873  managerRuntimeData.grpACh = NULL;
5874  managerRuntimeData.grpNE = NULL;
5875 
5876  // clear assistive data buffer for group monitor
5877  if (managerRuntimeData.grpDABuffer != NULL) delete [] managerRuntimeData.grpDABuffer;
5878  if (managerRuntimeData.grp5HTBuffer != NULL) delete [] managerRuntimeData.grp5HTBuffer;
5879  if (managerRuntimeData.grpAChBuffer != NULL) delete [] managerRuntimeData.grpAChBuffer;
5880  if (managerRuntimeData.grpNEBuffer != NULL) delete [] managerRuntimeData.grpNEBuffer;
5881  managerRuntimeData.grpDABuffer = NULL; managerRuntimeData.grp5HTBuffer = NULL;
5882  managerRuntimeData.grpAChBuffer = NULL; managerRuntimeData.grpNEBuffer = NULL;
5883 
5884  // -------------- DEALLOCATE CORE OBJECTS ---------------------- //
5885 
5886  if (managerRuntimeData.voltage!=NULL) delete[] managerRuntimeData.voltage;
5887  if (managerRuntimeData.nextVoltage != NULL) delete[] managerRuntimeData.nextVoltage;
5888  if (managerRuntimeData.recovery!=NULL) delete[] managerRuntimeData.recovery;
5889  if (managerRuntimeData.current!=NULL) delete[] managerRuntimeData.current;
5890  if (managerRuntimeData.extCurrent!=NULL) delete[] managerRuntimeData.extCurrent;
5891  if (managerRuntimeData.totalCurrent != NULL) delete[] managerRuntimeData.totalCurrent;
5892  if (managerRuntimeData.curSpike != NULL) delete[] managerRuntimeData.curSpike;
5893  if (managerRuntimeData.nVBuffer != NULL) delete[] managerRuntimeData.nVBuffer;
5894  if (managerRuntimeData.nUBuffer != NULL) delete[] managerRuntimeData.nUBuffer;
5895  if (managerRuntimeData.nIBuffer != NULL) delete[] managerRuntimeData.nIBuffer;
5896  managerRuntimeData.voltage=NULL; managerRuntimeData.recovery=NULL; managerRuntimeData.current=NULL; managerRuntimeData.extCurrent=NULL;
5897  managerRuntimeData.nextVoltage = NULL; managerRuntimeData.totalCurrent = NULL; managerRuntimeData.curSpike = NULL;
5898  managerRuntimeData.nVBuffer = NULL; managerRuntimeData.nUBuffer = NULL; managerRuntimeData.nIBuffer = NULL;
5899 
5900  if (managerRuntimeData.Izh_a!=NULL) delete[] managerRuntimeData.Izh_a;
5901  if (managerRuntimeData.Izh_b!=NULL) delete[] managerRuntimeData.Izh_b;
5902  if (managerRuntimeData.Izh_c!=NULL) delete[] managerRuntimeData.Izh_c;
5903  if (managerRuntimeData.Izh_d!=NULL) delete[] managerRuntimeData.Izh_d;
5904  if (managerRuntimeData.Izh_C!=NULL) delete[] managerRuntimeData.Izh_C;
5905  if (managerRuntimeData.Izh_k!=NULL) delete[] managerRuntimeData.Izh_k;
5906  if (managerRuntimeData.Izh_vr!=NULL) delete[] managerRuntimeData.Izh_vr;
5907  if (managerRuntimeData.Izh_vt!=NULL) delete[] managerRuntimeData.Izh_vt;
5908  if (managerRuntimeData.Izh_vpeak!=NULL) delete[] managerRuntimeData.Izh_vpeak;
5909  managerRuntimeData.Izh_a=NULL; managerRuntimeData.Izh_b=NULL; managerRuntimeData.Izh_c=NULL; managerRuntimeData.Izh_d=NULL;
5910  managerRuntimeData.Izh_C = NULL; managerRuntimeData.Izh_k = NULL; managerRuntimeData.Izh_vr = NULL; managerRuntimeData.Izh_vt = NULL; managerRuntimeData.Izh_vpeak = NULL;
5911 
5912  if (managerRuntimeData.lif_tau_m!=NULL) delete[] managerRuntimeData.lif_tau_m;
5913  if (managerRuntimeData.lif_tau_ref!=NULL) delete[] managerRuntimeData.lif_tau_ref;
5914  if (managerRuntimeData.lif_tau_ref_c!=NULL) delete[] managerRuntimeData.lif_tau_ref_c;
5915  if (managerRuntimeData.lif_vTh!=NULL) delete[] managerRuntimeData.lif_vTh;
5916  if (managerRuntimeData.lif_vReset!=NULL) delete[] managerRuntimeData.lif_vReset;
5917  if (managerRuntimeData.lif_gain!=NULL) delete[] managerRuntimeData.lif_gain;
5918  if (managerRuntimeData.lif_bias!=NULL) delete[] managerRuntimeData.lif_bias;
5919  managerRuntimeData.lif_tau_m=NULL; managerRuntimeData.lif_tau_ref=NULL; managerRuntimeData.lif_vTh=NULL;
5920  managerRuntimeData.lif_vReset=NULL; managerRuntimeData.lif_gain=NULL; managerRuntimeData.lif_bias=NULL;
5921  managerRuntimeData.lif_tau_ref_c=NULL;
5922 
5923  if (managerRuntimeData.Npre!=NULL) delete[] managerRuntimeData.Npre;
5924  if (managerRuntimeData.Npre_plastic!=NULL) delete[] managerRuntimeData.Npre_plastic;
5925  if (managerRuntimeData.Npost!=NULL) delete[] managerRuntimeData.Npost;
5926  managerRuntimeData.Npre=NULL; managerRuntimeData.Npre_plastic=NULL; managerRuntimeData.Npost=NULL;
5927 
5928  if (managerRuntimeData.cumulativePre!=NULL) delete[] managerRuntimeData.cumulativePre;
5929  if (managerRuntimeData.cumulativePost!=NULL) delete[] managerRuntimeData.cumulativePost;
5930  managerRuntimeData.cumulativePre=NULL; managerRuntimeData.cumulativePost=NULL;
5931 
5932  if (managerRuntimeData.gAMPA!=NULL) delete[] managerRuntimeData.gAMPA;
5933  if (managerRuntimeData.gNMDA!=NULL) delete[] managerRuntimeData.gNMDA;
5934  if (managerRuntimeData.gNMDA_r!=NULL) delete[] managerRuntimeData.gNMDA_r;
5935  if (managerRuntimeData.gNMDA_d!=NULL) delete[] managerRuntimeData.gNMDA_d;
5936  if (managerRuntimeData.gGABAa!=NULL) delete[] managerRuntimeData.gGABAa;
5937  if (managerRuntimeData.gGABAb!=NULL) delete[] managerRuntimeData.gGABAb;
5938  if (managerRuntimeData.gGABAb_r!=NULL) delete[] managerRuntimeData.gGABAb_r;
5939  if (managerRuntimeData.gGABAb_d!=NULL) delete[] managerRuntimeData.gGABAb_d;
5940  managerRuntimeData.gAMPA=NULL; managerRuntimeData.gNMDA=NULL; managerRuntimeData.gNMDA_r=NULL; managerRuntimeData.gNMDA_d=NULL;
5941  managerRuntimeData.gGABAa=NULL; managerRuntimeData.gGABAb=NULL; managerRuntimeData.gGABAb_r=NULL; managerRuntimeData.gGABAb_d=NULL;
5942 
5943  if (managerRuntimeData.stpu!=NULL) delete[] managerRuntimeData.stpu;
5944  if (managerRuntimeData.stpx!=NULL) delete[] managerRuntimeData.stpx;
5945  managerRuntimeData.stpu=NULL; managerRuntimeData.stpx=NULL;
5946 
5947  if (managerRuntimeData.avgFiring!=NULL) delete[] managerRuntimeData.avgFiring;
5948  if (managerRuntimeData.baseFiring!=NULL) delete[] managerRuntimeData.baseFiring;
5949  managerRuntimeData.avgFiring=NULL; managerRuntimeData.baseFiring=NULL;
5950 
5951  if (managerRuntimeData.lastSpikeTime!=NULL) delete[] managerRuntimeData.lastSpikeTime;
5952  if (managerRuntimeData.synSpikeTime !=NULL) delete[] managerRuntimeData.synSpikeTime;
5953  if (managerRuntimeData.nSpikeCnt!=NULL) delete[] managerRuntimeData.nSpikeCnt;
5954  managerRuntimeData.lastSpikeTime=NULL; managerRuntimeData.synSpikeTime=NULL; managerRuntimeData.nSpikeCnt=NULL;
5955 
5956  if (managerRuntimeData.postDelayInfo!=NULL) delete[] managerRuntimeData.postDelayInfo;
5957  if (managerRuntimeData.preSynapticIds!=NULL) delete[] managerRuntimeData.preSynapticIds;
5958  if (managerRuntimeData.postSynapticIds!=NULL) delete[] managerRuntimeData.postSynapticIds;
5959  managerRuntimeData.postDelayInfo=NULL; managerRuntimeData.preSynapticIds=NULL; managerRuntimeData.postSynapticIds=NULL;
5960 
5961  if (managerRuntimeData.wt!=NULL) delete[] managerRuntimeData.wt;
5962  if (managerRuntimeData.maxSynWt!=NULL) delete[] managerRuntimeData.maxSynWt;
5963  if (managerRuntimeData.wtChange !=NULL) delete[] managerRuntimeData.wtChange;
5964  managerRuntimeData.wt=NULL; managerRuntimeData.maxSynWt=NULL; managerRuntimeData.wtChange=NULL;
5965 
5966  if (mulSynFast!=NULL) delete[] mulSynFast;
5967  if (mulSynSlow!=NULL) delete[] mulSynSlow;
5968  if (managerRuntimeData.connIdsPreIdx!=NULL) delete[] managerRuntimeData.connIdsPreIdx;
5969  mulSynFast=NULL; mulSynSlow=NULL; managerRuntimeData.connIdsPreIdx=NULL;
5970 
5971  if (managerRuntimeData.grpIds!=NULL) delete[] managerRuntimeData.grpIds;
5972  managerRuntimeData.grpIds=NULL;
5973 
5974  if (managerRuntimeData.timeTableD2 != NULL) delete [] managerRuntimeData.timeTableD2;
5975  if (managerRuntimeData.timeTableD1 != NULL) delete [] managerRuntimeData.timeTableD1;
5976  managerRuntimeData.timeTableD2 = NULL; managerRuntimeData.timeTableD1 = NULL;
5977 
5978  if (managerRuntimeData.firingTableD2!=NULL) delete[] managerRuntimeData.firingTableD2;
5979  if (managerRuntimeData.firingTableD1!=NULL) delete[] managerRuntimeData.firingTableD1;
5980  //if (managerRuntimeData.firingTableD2!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.firingTableD2));
5981  //if (managerRuntimeData.firingTableD1!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.firingTableD1));
5982  managerRuntimeData.firingTableD2 = NULL; managerRuntimeData.firingTableD1 = NULL;
5983 
5984  if (managerRuntimeData.extFiringTableD2!=NULL) delete[] managerRuntimeData.extFiringTableD2;
5985  if (managerRuntimeData.extFiringTableD1!=NULL) delete[] managerRuntimeData.extFiringTableD1;
5986  //if (managerRuntimeData.extFiringTableD2!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableD2));
5987  //if (managerRuntimeData.extFiringTableD1!=NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableD1));
5988  managerRuntimeData.extFiringTableD2 = NULL; managerRuntimeData.extFiringTableD1 = NULL;
5989 
5990  if (managerRuntimeData.extFiringTableEndIdxD1 != NULL) delete[] managerRuntimeData.extFiringTableEndIdxD1;
5991  if (managerRuntimeData.extFiringTableEndIdxD2 != NULL) delete[] managerRuntimeData.extFiringTableEndIdxD2;
5992  //if (managerRuntimeData.extFiringTableEndIdxD1 != NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableEndIdxD1));
5993  //if (managerRuntimeData.extFiringTableEndIdxD2 != NULL) CUDA_CHECK_ERRORS(cudaFreeHost(managerRuntimeData.extFiringTableEndIdxD2));
5994  managerRuntimeData.extFiringTableEndIdxD1 = NULL; managerRuntimeData.extFiringTableEndIdxD2 = NULL;
5995 }
5996 
6000 void SNN::resetPoissonNeuron(int netId, int lGrpId, int lNId) {
6001  assert(lNId < networkConfigs[netId].numN);
6002  managerRuntimeData.lastSpikeTime[lNId] = MAX_SIMULATION_TIME;
6003  if (groupConfigs[netId][lGrpId].WithHomeostasis)
6004  managerRuntimeData.avgFiring[lNId] = 0.0f;
6005 
6006  if (groupConfigs[netId][lGrpId].WithSTP) {
6007  for (int j = 0; j < networkConfigs[netId].maxDelay + 1; j++) { // is of size maxDelay_+1
6008  int index = STP_BUF_POS(lNId, j, networkConfigs[netId].maxDelay);
6009  managerRuntimeData.stpu[index] = 0.0f;
6010  managerRuntimeData.stpx[index] = 1.0f;
6011  }
6012  }
6013 }
6014 
6015 void SNN::resetPropogationBuffer() {
6016  // FIXME: why 1023?
6017  spikeBuf->reset(0, 1023);
6018 }
6019 
6020 //Reset wt, wtChange, pre-firing time values to default values, rewritten to
6021 //integrate changes between JMN and MDR -- KDC
6022 //if changeWeights is false, we should keep the values of the weights as they currently
6023 //are but we should be able to change them to plastic or fixed synapses. -- KDC
6024 // FIXME: imlement option of resetting weights
6025 void SNN::resetSynapse(int netId, bool changeWeights) {
6026  memset(managerRuntimeData.wtChange, 0, sizeof(float) * networkConfigs[netId].numPreSynNet); // reset the synaptic derivatives
6027 
6028  for (int syn = 0; syn < networkConfigs[netId].numPreSynNet; syn++)
6029  managerRuntimeData.synSpikeTime[syn] = MAX_SIMULATION_TIME; // reset the spike time of each syanpse
6030 }
6031 
6032 void SNN::resetTimeTable() {
6033  memset(managerRuntimeData.timeTableD2, 0, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
6034  memset(managerRuntimeData.timeTableD1, 0, sizeof(int) * (1000 + glbNetworkConfig.maxDelay + 1));
6035 }
6036 
6037 void SNN::resetFiringTable() {
6038  memset(managerRuntimeData.firingTableD2, 0, sizeof(int) * managerRTDSize.maxMaxSpikeD2);
6039  memset(managerRuntimeData.firingTableD1, 0, sizeof(int) * managerRTDSize.maxMaxSpikeD1);
6040  memset(managerRuntimeData.extFiringTableEndIdxD2, 0, sizeof(int) * managerRTDSize.maxNumGroups);
6041  memset(managerRuntimeData.extFiringTableEndIdxD1, 0, sizeof(int) * managerRTDSize.maxNumGroups);
6042  memset(managerRuntimeData.extFiringTableD2, 0, sizeof(int*) * managerRTDSize.maxNumGroups);
6043  memset(managerRuntimeData.extFiringTableD1, 0, sizeof(int*) * managerRTDSize.maxNumGroups);
6044 }
6045 
6046 void SNN::resetSpikeCnt(int gGrpId) {
6047  assert(gGrpId >= ALL);
6048 
6049  if (gGrpId == ALL) {
6050  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6051  pthread_t threads[numCores + 1]; // 1 additional array size if numCores == 0, it may work though bad practice
6052  cpu_set_t cpus;
6053  ThreadStruct argsThreadRoutine[numCores + 1]; // same as above, +1 array size
6054  int threadCount = 0;
6055  #endif
6056 
6057  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6058  if (!groupPartitionLists[netId].empty()) {
6059  if (netId < CPU_RUNTIME_BASE) // GPU runtime
6060  resetSpikeCnt_GPU(netId, ALL);
6061  else{ // CPU runtime
6062  #if defined(WIN32) || defined(WIN64) || defined(__APPLE__)
6063  resetSpikeCnt_CPU(netId, ALL);
6064  #else // Linux or MAC
6065  pthread_attr_t attr;
6066  pthread_attr_init(&attr);
6067  CPU_ZERO(&cpus);
6068  CPU_SET(threadCount%NUM_CPU_CORES, &cpus);
6069  pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
6070 
6071  argsThreadRoutine[threadCount].snn_pointer = this;
6072  argsThreadRoutine[threadCount].netId = netId;
6073  argsThreadRoutine[threadCount].lGrpId = ALL;
6074  argsThreadRoutine[threadCount].startIdx = 0;
6075  argsThreadRoutine[threadCount].endIdx = 0;
6076  argsThreadRoutine[threadCount].GtoLOffset = 0;
6077 
6078  pthread_create(&threads[threadCount], &attr, &SNN::helperResetSpikeCnt_CPU, (void*)&argsThreadRoutine[threadCount]);
6079  pthread_attr_destroy(&attr);
6080  threadCount++;
6081  #endif
6082  }
6083  }
6084  }
6085 
6086  #if !defined(WIN32) && !defined(WIN64) && !defined(__APPLE__) // Linux or MAC
6087  // join all the threads
6088  for (int i=0; i<threadCount; i++){
6089  pthread_join(threads[i], NULL);
6090  }
6091  #endif
6092  }
6093  else {
6094  int netId = groupConfigMDMap[gGrpId].netId;
6095  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6096 
6097  if (netId < CPU_RUNTIME_BASE) // GPU runtime
6098  resetSpikeCnt_GPU(netId, lGrpId);
6099  else // CPU runtime
6100  resetSpikeCnt_CPU(netId, lGrpId);
6101  }
6102 }
6103 
6104 
6106 inline SynInfo SNN::SET_CONN_ID(int nId, int sId, int grpId) {
6107  if (grpId > GROUP_ID_MASK) {
6108  KERNEL_ERROR("Error: Group Id (%d) exceeds maximum limit (%d)", grpId, GROUP_ID_MASK);
6110  }
6111 
6112  SynInfo synInfo;
6113  //p.postId = (((sid)<<CONN_SYN_NEURON_BITS)+((nid)&CONN_SYN_NEURON_MASK));
6114  //p.grpId = grpId;
6115  synInfo.gsId = ((grpId << NUM_SYNAPSE_BITS) | sId);
6116  synInfo.nId = nId;
6117 
6118  return synInfo;
6119 }
6120 
6121 
6122 void SNN::setGrpTimeSlice(int gGrpId, int timeSlice) {
6123  if (gGrpId == ALL) {
6124  for(int grpId = 0; grpId < numGroups; grpId++) {
6125  if (groupConfigMap[grpId].isSpikeGenerator)
6126  setGrpTimeSlice(grpId, timeSlice);
6127  }
6128  } else {
6129  assert((timeSlice > 0 ) && (timeSlice <= MAX_TIME_SLICE));
6130  // the group should be poisson spike generator group
6131  groupConfigMDMap[gGrpId].currTimeSlice = timeSlice;
6132  }
6133 }
6134 
6135 // method to set const member randSeed_
6136 int SNN::setRandSeed(int seed) {
6137  if (seed<0)
6138  return time(NULL);
6139  else if(seed==0)
6140  return 123;
6141  else
6142  return seed;
6143 }
6144 
6145 void SNN::fillSpikeGenBits(int netId) {
6146  SpikeBuffer::SpikeIterator spikeBufIter;
6147  SpikeBuffer::SpikeIterator spikeBufIterEnd = spikeBuf->back();
6148 
6149  // Covert spikes stored in spikeBuffer to SpikeGenBit
6150  for (spikeBufIter = spikeBuf->front(); spikeBufIter != spikeBufIterEnd; ++spikeBufIter) {
6151  // get the global neuron id and group id for this particular spike
6152  int gGrpId = spikeBufIter->grpId;
6153 
6154  if (groupConfigMDMap[gGrpId].netId == netId) {
6155  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6156  int lNId = spikeBufIter->neurId /* gNId */ + groupConfigMDMap[gGrpId].GtoLOffset;
6157 
6158  // add spike to spikeGentBit
6159  assert(groupConfigMap[gGrpId].isSpikeGenerator == true);
6160 
6161  int nIdPos = (lNId - groupConfigs[netId][lGrpId].lStartN + groupConfigs[netId][lGrpId].Noffset);
6162  int nIdBitPos = nIdPos % 32;
6163  int nIdIndex = nIdPos / 32;
6164 
6165  assert(nIdIndex < (networkConfigs[netId].numNSpikeGen / 32 + 1));
6166 
6167  managerRuntimeData.spikeGenBits[nIdIndex] |= (1 << nIdBitPos);
6168  }
6169  }
6170 }
6171 
6172 void SNN::startTiming() { prevExecutionTime = cumExecutionTime; }
6173 void SNN::stopTiming() {
6174  executionTime += (cumExecutionTime - prevExecutionTime);
6175  prevExecutionTime = cumExecutionTime;
6176 }
6177 
6178 // enters testing phase
6179 // in testing, no weight changes can be made, allowing you to evaluate learned weights, etc.
6180 void SNN::startTesting(bool shallUpdateWeights) {
6181  // because this can be called at any point in time, if we're off the 1-second grid, we want to make
6182  // sure to apply the accumulated weight changes to the weight matrix
6183  // but we don't reset the wt update interval counter
6184  if (shallUpdateWeights && !sim_in_testing) {
6185  // careful: need to temporarily adjust stdpScaleFactor to make this right
6186  if (wtANDwtChangeUpdateIntervalCnt_) {
6187  float storeScaleSTDP = stdpScaleFactor_;
6188  stdpScaleFactor_ = 1.0f/wtANDwtChangeUpdateIntervalCnt_;
6189 
6190  updateWeights();
6191 
6192  stdpScaleFactor_ = storeScaleSTDP;
6193  }
6194  }
6195 
6196  sim_in_testing = true;
6197 
6198  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6199  if (!groupPartitionLists[netId].empty()) {
6200  networkConfigs[netId].sim_in_testing = true;
6201  updateNetworkConfig(netId); // update networkConfigRT struct (|TODO copy only a single boolean)
6202  }
6203  }
6204 }
6205 
6206 // exits testing phase
6208  sim_in_testing = false;
6209 
6210  for (int netId = 0; netId < MAX_NET_PER_SNN; netId++) {
6211  if (!groupPartitionLists[netId].empty()) {
6212  networkConfigs[netId].sim_in_testing = false;
6213  updateNetworkConfig(netId); // update networkConfigRT struct (|TODO copy only a single boolean)
6214  }
6215  }
6216 }
6217 
6218 void SNN::updateConnectionMonitor(short int connId) {
6219  for (int monId=0; monId<numConnectionMonitor; monId++) {
6220  if (connId==ALL || connMonCoreList[monId]->getConnectId()==connId) {
6221  int timeInterval = connMonCoreList[monId]->getUpdateTimeIntervalSec();
6222  if (timeInterval==1 || timeInterval>1 && (getSimTime()%timeInterval)==0) {
6223  // this ConnectionMonitor wants periodic recording
6224  connMonCoreList[monId]->writeConnectFileSnapshot(simTime,
6225  getWeightMatrix2D(connMonCoreList[monId]->getConnectId()));
6226  }
6227  }
6228  }
6229 }
6230 
6231 // FIXME: modify this for multi-GPUs
6232 std::vector< std::vector<float> > SNN::getWeightMatrix2D(short int connId) {
6233  assert(connId > ALL); // ALL == -1
6234  std::vector< std::vector<float> > wtConnId;
6235 
6236  int grpIdPre = connectConfigMap[connId].grpSrc;
6237  int grpIdPost = connectConfigMap[connId].grpDest;
6238 
6239  int netIdPost = groupConfigMDMap[grpIdPost].netId;
6240  int lGrpIdPost = groupConfigMDMap[grpIdPost].lGrpId;
6241 
6242  // init weight matrix with right dimensions
6243  for (int i = 0; i < groupConfigMap[grpIdPre].numN; i++) {
6244  std::vector<float> wtSlice;
6245  for (int j = 0; j < groupConfigMap[grpIdPost].numN; j++) {
6246  wtSlice.push_back(NAN);
6247  }
6248  wtConnId.push_back(wtSlice);
6249  }
6250 
6251  // copy the weights for a given post-group from device
6252  // \TODO: check if the weights for this grpIdPost have already been copied
6253  // \TODO: even better, but tricky because of ordering, make copyWeightState connection-based
6254 
6255  assert(grpIdPost > ALL); // ALL == -1
6256 
6257  // Note, copyWeightState() also copies pre-connections information (e.g., Npre, Npre_plastic, cumulativePre, and preSynapticIds)
6258  fetchWeightState(netIdPost, lGrpIdPost);
6259  fetchConnIdsLookupArray(netIdPost);
6260 
6261  for (int lNIdPost = groupConfigs[netIdPost][lGrpIdPost].lStartN; lNIdPost <= groupConfigs[netIdPost][lGrpIdPost].lEndN; lNIdPost++) {
6262  unsigned int pos_ij = managerRuntimeData.cumulativePre[lNIdPost];
6263  for (int i = 0; i < managerRuntimeData.Npre[lNIdPost]; i++, pos_ij++) {
6264  // skip synapses that belong to a different connection ID
6265  if (managerRuntimeData.connIdsPreIdx[pos_ij] != connId) //connInfo->connId)
6266  continue;
6267 
6268  // find pre-neuron ID and update ConnectionMonitor container
6269  int lNIdPre = GET_CONN_NEURON_ID(managerRuntimeData.preSynapticIds[pos_ij]);
6270  int lGrpIdPre = GET_CONN_GRP_ID(managerRuntimeData.preSynapticIds[pos_ij]);
6271  wtConnId[lNIdPre - groupConfigs[netIdPost][lGrpIdPre].lStartN][lNIdPost - groupConfigs[netIdPost][lGrpIdPost].lStartN] =
6272  fabs(managerRuntimeData.wt[pos_ij]);
6273  }
6274  }
6275 
6276  return wtConnId;
6277 }
6278 
6279 void SNN::updateGroupMonitor(int gGrpId) {
6280  // don't continue if no group monitors in the network
6281  if (!numGroupMonitor)
6282  return;
6283 
6284  if (gGrpId == ALL) {
6285  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6286  updateGroupMonitor(gGrpId);
6287  } else {
6288  int netId = groupConfigMDMap[gGrpId].netId;
6289  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6290  // update group monitor of a specific group
6291  // find index in group monitor arrays
6292  int monitorId = groupConfigMDMap[gGrpId].groupMonitorId;
6293 
6294  // don't continue if no group monitor enabled for this group
6295  if (monitorId < 0) return;
6296 
6297  // find last update time for this group
6298  GroupMonitorCore* grpMonObj = groupMonCoreList[monitorId];
6299  int lastUpdate = grpMonObj->getLastUpdated();
6300 
6301  // don't continue if time interval is zero (nothing to update)
6302  if (getSimTime() - lastUpdate <= 0)
6303  return;
6304 
6305  if (getSimTime() - lastUpdate > 1000)
6306  KERNEL_ERROR("updateGroupMonitor(grpId=%d) must be called at least once every second", gGrpId);
6307 
6308  // copy the group status (neuromodulators) to the manager runtime
6309  fetchGroupState(netId, lGrpId);
6310 
6311  // find the time interval in which to update group status
6312  // usually, we call updateGroupMonitor once every second, so the time interval is [0,1000)
6313  // however, updateGroupMonitor can be called at any time t \in [0,1000)... so we can have the cases
6314  // [0,t), [t,1000), and even [t1, t2)
6315  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
6316  int numMsMax = getSimTimeMs(); // upper bound is given by current time
6317  if (numMsMax == 0)
6318  numMsMax = 1000; // special case: full second
6319  assert(numMsMin < numMsMax);
6320 
6321  // current time is last completed second in milliseconds (plus t to be added below)
6322  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
6323  int currentTimeSec = getSimTimeSec();
6324  if (!getSimTimeMs())
6325  currentTimeSec--;
6326 
6327  // save current time as last update time
6328  grpMonObj->setLastUpdated(getSimTime());
6329 
6330  // prepare fast access
6331  FILE* grpFileId = groupMonCoreList[monitorId]->getGroupFileId();
6332  bool writeGroupToFile = grpFileId != NULL;
6333  bool writeGroupToArray = grpMonObj->isRecording();
6334  float data;
6335 
6336  // Read one peice of data at a time from the buffer and put the data to an appopriate monitor buffer. Later the user
6337  // may need need to dump these group status data to an output file
6338  for(int t = numMsMin; t < numMsMax; t++) {
6339  // fetch group status data, support dopamine concentration currently
6340  data = managerRuntimeData.grpDABuffer[lGrpId * 1000 + t];
6341 
6342  // current time is last completed second plus whatever is leftover in t
6343  int time = currentTimeSec * 1000 + t;
6344 
6345  if (writeGroupToFile) {
6346  // TODO: write to group status file
6347  }
6348 
6349  if (writeGroupToArray) {
6350  grpMonObj->pushData(time, data);
6351  }
6352  }
6353 
6354  if (grpFileId!=NULL) // flush group status file
6355  fflush(grpFileId);
6356  }
6357 }
6358 
6359 // FIXME: wrong to use groupConfigs[0]
6360 void SNN::userDefinedSpikeGenerator(int gGrpId) {
6361  // \FIXME this function is a mess
6362  SpikeGeneratorCore* spikeGenFunc = groupConfigMap[gGrpId].spikeGenFunc;
6363  int netId = groupConfigMDMap[gGrpId].netId;
6364  int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6365  int currTime = simTime;
6366  bool done;
6367 
6368  fetchLastSpikeTime(netId);
6369 
6370  for(int gNId = groupConfigMDMap[gGrpId].gStartN; gNId <= groupConfigMDMap[gGrpId].gEndN; gNId++) {
6371  // start the time from the last time it spiked, that way we can ensure that the refractory period is maintained
6372  int lNId = gNId + groupConfigMDMap[gGrpId].GtoLOffset;
6373  int nextTime = managerRuntimeData.lastSpikeTime[lNId];
6374  if (nextTime == MAX_SIMULATION_TIME)
6375  nextTime = 0;
6376 
6377  // the end of the valid time window is either the length of the scheduling time slice from now (because that
6378  // is the max of the allowed propagated buffer size) or simply the end of the simulation
6379  int endOfTimeWindow = std::min(currTime+timeSlice, simTimeRunStop);
6380 
6381  done = false;
6382  while (!done) {
6383  // generate the next spike time (nextSchedTime) from the nextSpikeTime callback
6384  int nextSchedTime = spikeGenFunc->nextSpikeTime(this, gGrpId, gNId - groupConfigMDMap[gGrpId].gStartN, currTime, nextTime, endOfTimeWindow);
6385 
6386  // the generated spike time is valid only if:
6387  // - it has not been scheduled before (nextSchedTime > nextTime)
6388  // - but careful: we would drop spikes at t=0, because we cannot initialize nextTime to -1...
6389  // - it is within the scheduling time slice (nextSchedTime < endOfTimeWindow)
6390  // - it is not in the past (nextSchedTime >= currTime)
6391  if ((nextSchedTime==0 || nextSchedTime>nextTime) && nextSchedTime<endOfTimeWindow && nextSchedTime>=currTime) {
6392 // fprintf(stderr,"%u: spike scheduled for %d at %u\n",currTime, i-groupConfigs[0][grpId].StartN,nextSchedTime);
6393  // scheduled spike...
6394  // \TODO CPU mode does not check whether the same AER event has been scheduled before (bug #212)
6395  // check how GPU mode does it, then do the same here.
6396  nextTime = nextSchedTime;
6397  spikeBuf->schedule(gNId, gGrpId, nextTime - currTime);
6398  } else {
6399  done = true;
6400  }
6401  }
6402  }
6403 }
6404 
6405 void SNN::generateUserDefinedSpikes() {
6406  for(int gGrpId = 0; gGrpId < numGroups; gGrpId++) {
6407  if (groupConfigMap[gGrpId].isSpikeGenerator) {
6408  // This evaluation is done to check if its time to get new set of spikes..
6409  // check whether simTime has advance more than the current time slice, in which case we need to schedule
6410  // spikes for the next time slice
6411  // we always have to run this the first millisecond of a new runNetwork call; that is,
6412  // when simTime==simTimeRunStart
6413  if(((simTime - groupConfigMDMap[gGrpId].sliceUpdateTime) >= groupConfigMDMap[gGrpId].currTimeSlice || simTime == simTimeRunStart)) {
6414  int timeSlice = groupConfigMDMap[gGrpId].currTimeSlice;
6415  groupConfigMDMap[gGrpId].sliceUpdateTime = simTime;
6416 
6417  // we dont generate any poisson spike if during the
6418  // current call we might exceed the maximum 32 bit integer value
6419  if ((simTime + timeSlice) == MAX_SIMULATION_TIME || (simTime + timeSlice) < 0)
6420  return;
6421 
6422  if (groupConfigMap[gGrpId].spikeGenFunc != NULL) {
6423  userDefinedSpikeGenerator(gGrpId);
6424  }
6425  }
6426  }
6427  }
6428 }
6429 
6435 void SNN::allocateManagerSpikeTables() {
6436  managerRuntimeData.firingTableD2 = new int[managerRTDSize.maxMaxSpikeD2];
6437  managerRuntimeData.firingTableD1 = new int[managerRTDSize.maxMaxSpikeD1];
6438  managerRuntimeData.extFiringTableEndIdxD2 = new int[managerRTDSize.maxNumGroups];
6439  managerRuntimeData.extFiringTableEndIdxD1 = new int[managerRTDSize.maxNumGroups];
6440  managerRuntimeData.extFiringTableD2 = new int*[managerRTDSize.maxNumGroups];
6441  managerRuntimeData.extFiringTableD1 = new int*[managerRTDSize.maxNumGroups];
6442 
6443  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.firingTableD2, sizeof(int) * managerRTDSize.maxMaxSpikeD2));
6444  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.firingTableD1, sizeof(int) * managerRTDSize.maxMaxSpikeD1));
6445  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableEndIdxD2, sizeof(int) * managerRTDSize.maxNumGroups));
6446  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableEndIdxD1, sizeof(int) * managerRTDSize.maxNumGroups));
6447  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableD2, sizeof(int*) * managerRTDSize.maxNumGroups));
6448  //CUDA_CHECK_ERRORS(cudaMallocHost(&managerRuntimeData.extFiringTableD1, sizeof(int*) * managerRTDSize.maxNumGroups));
6449  resetFiringTable();
6450 
6451  managerRuntimeData.timeTableD2 = new unsigned int[TIMING_COUNT];
6452  managerRuntimeData.timeTableD1 = new unsigned int[TIMING_COUNT];
6453  resetTimeTable();
6454 }
6455 
6456 // updates simTime, returns true when new second started
6457 bool SNN::updateTime() {
6458  bool finishedOneSec = false;
6459 
6460  // done one second worth of simulation
6461  // update relevant parameters...now
6462  if(++simTimeMs == 1000) {
6463  simTimeMs = 0;
6464  simTimeSec++;
6465  finishedOneSec = true;
6466  }
6467 
6468  simTime++;
6469  if(simTime == MAX_SIMULATION_TIME || simTime < 0){
6470  // reached the maximum limit of the simulation time using 32 bit value...
6471  KERNEL_WARN("Maximum Simulation Time Reached...Resetting simulation time");
6472  }
6473 
6474  return finishedOneSec;
6475 }
6476 
6477 // FIXME: modify this for multi-GPUs
6478 void SNN::updateSpikeMonitor(int gGrpId) {
6479  // don't continue if no spike monitors in the network
6480  if (!numSpikeMonitor)
6481  return;
6482 
6483  if (gGrpId == ALL) {
6484  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6485  updateSpikeMonitor(gGrpId);
6486  } else {
6487  int netId = groupConfigMDMap[gGrpId].netId;
6488  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6489  // update spike monitor of a specific group
6490  // find index in spike monitor arrays
6491  int monitorId = groupConfigMDMap[gGrpId].spikeMonitorId;
6492 
6493  // don't continue if no spike monitor enabled for this group
6494  if (monitorId < 0) return;
6495 
6496  // find last update time for this group
6497  SpikeMonitorCore* spkMonObj = spikeMonCoreList[monitorId];
6498  long int lastUpdate = spkMonObj->getLastUpdated();
6499 
6500  // don't continue if time interval is zero (nothing to update)
6501  if ( ((long int)getSimTime()) - lastUpdate <= 0)
6502  return;
6503 
6504  if ( ((long int)getSimTime()) - lastUpdate > 1000)
6505  KERNEL_ERROR("updateSpikeMonitor(grpId=%d) must be called at least once every second",gGrpId);
6506 
6507  // AER buffer max size warning here.
6508  // Because of C++ short-circuit evaluation, the last condition should not be evaluated
6509  // if the previous conditions are false.
6510  if (spkMonObj->getAccumTime() > LONG_SPIKE_MON_DURATION \
6511  && this->getGroupNumNeurons(gGrpId) > LARGE_SPIKE_MON_GRP_SIZE \
6512  && spkMonObj->isBufferBig()){
6513  // change this warning message to correct message
6514  KERNEL_WARN("updateSpikeMonitor(grpId=%d) is becoming very large. (>%lu MB)",gGrpId,(long int) MAX_SPIKE_MON_BUFFER_SIZE/1024 );// make this better
6515  KERNEL_WARN("Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.",spkMonObj->getAccumTime()/(1000*60),this->getGroupNumNeurons(gGrpId));
6516  }
6517 
6518  // copy the neuron firing information to the manager runtime
6519  fetchSpikeTables(netId);
6520  fetchGrpIdsLookupArray(netId);
6521 
6522  // find the time interval in which to update spikes
6523  // usually, we call updateSpikeMonitor once every second, so the time interval is [0,1000)
6524  // however, updateSpikeMonitor can be called at any time t \in [0,1000)... so we can have the cases
6525  // [0,t), [t,1000), and even [t1, t2)
6526  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
6527  int numMsMax = getSimTimeMs(); // upper bound is given by current time
6528  if (numMsMax == 0)
6529  numMsMax = 1000; // special case: full second
6530  assert(numMsMin < numMsMax);
6531 
6532  // current time is last completed second in milliseconds (plus t to be added below)
6533  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
6534  int currentTimeSec = getSimTimeSec();
6535  if (!getSimTimeMs())
6536  currentTimeSec--;
6537 
6538  // save current time as last update time
6539  spkMonObj->setLastUpdated( (long int)getSimTime() );
6540 
6541  // prepare fast access
6542  FILE* spkFileId = spikeMonCoreList[monitorId]->getSpikeFileId();
6543  bool writeSpikesToFile = spkFileId != NULL;
6544  bool writeSpikesToArray = spkMonObj->getMode()==AER && spkMonObj->isRecording();
6545 
6546  // Read one spike at a time from the buffer and put the spikes to an appopriate monitor buffer. Later the user
6547  // may need need to dump these spikes to an output file
6548  for (int k = 0; k < 2; k++) {
6549  unsigned int* timeTablePtr = (k == 0) ? managerRuntimeData.timeTableD2 : managerRuntimeData.timeTableD1;
6550  int* fireTablePtr = (k == 0) ? managerRuntimeData.firingTableD2 : managerRuntimeData.firingTableD1;
6551  for(int t = numMsMin; t < numMsMax; t++) {
6552  for(int i = timeTablePtr[t + glbNetworkConfig.maxDelay]; i < timeTablePtr[t + glbNetworkConfig.maxDelay + 1]; i++) {
6553  // retrieve the neuron id
6554  int lNId = fireTablePtr[i];
6555 
6556  // make sure neuron belongs to currently relevant group
6557  int this_grpId = managerRuntimeData.grpIds[lNId];
6558  if (this_grpId != lGrpId)
6559  continue;
6560 
6561  // adjust nid to be 0-indexed for each group
6562  // this way, if a group has 10 neurons, their IDs in the spike file and spike monitor will be
6563  // indexed from 0..9, no matter what their real nid is
6564  int nId = lNId - groupConfigs[netId][lGrpId].lStartN;
6565  assert(nId >= 0);
6566 
6567  // current time is last completed second plus whatever is leftover in t
6568  int time = currentTimeSec * 1000 + t;
6569 
6570  if (writeSpikesToFile) {
6571  int cnt;
6572  cnt = fwrite(&time, sizeof(int), 1, spkFileId); assert(cnt==1);
6573  cnt = fwrite(&nId, sizeof(int), 1, spkFileId); assert(cnt==1);
6574  }
6575 
6576  if (writeSpikesToArray) {
6577  spkMonObj->pushAER(time, nId);
6578  }
6579  }
6580  }
6581  }
6582 
6583  if (spkFileId!=NULL) // flush spike file
6584  fflush(spkFileId);
6585  }
6586 }
6587 
6588 // FIXME: modify this for multi-GPUs
6589 void SNN::updateNeuronMonitor(int gGrpId) {
6590  // don't continue if no neuron monitors in the network
6591  if (!numNeuronMonitor)
6592  return;
6593 
6594  //printf("The global group id is: %i\n", gGrpId);
6595 
6596  if (gGrpId == ALL) {
6597  for (int gGrpId = 0; gGrpId < numGroups; gGrpId++)
6598  updateNeuronMonitor(gGrpId);
6599  }
6600  else {
6601  //printf("UpdateNeuronMonitor is being executed!\n");
6602  int netId = groupConfigMDMap[gGrpId].netId;
6603  int lGrpId = groupConfigMDMap[gGrpId].lGrpId;
6604  // update neuron monitor of a specific group
6605  // find index in neuron monitor arrays
6606  int monitorId = groupConfigMDMap[gGrpId].neuronMonitorId;
6607 
6608  // don't continue if no spike monitor enabled for this group
6609  if (monitorId < 0) return;
6610 
6611  // find last update time for this group
6612  NeuronMonitorCore* nrnMonObj = neuronMonCoreList[monitorId];
6613  long int lastUpdate = nrnMonObj->getLastUpdated();
6614 
6615  // don't continue if time interval is zero (nothing to update)
6616  if (((long int)getSimTime()) - lastUpdate <= 0)
6617  return;
6618 
6619  if (((long int)getSimTime()) - lastUpdate > 1000)
6620  KERNEL_ERROR("updateNeuronMonitor(grpId=%d) must be called at least once every second", gGrpId);
6621 
6622  // AER buffer max size warning here.
6623  // Because of C++ short-circuit evaluation, the last condition should not be evaluated
6624  // if the previous conditions are false.
6625 
6626  /*if (nrnMonObj->getAccumTime() > LONG_NEURON_MON_DURATION \
6627  && this->getGroupNumNeurons(gGrpId) > LARGE_NEURON_MON_GRP_SIZE \
6628  && nrnMonObj->isBufferBig()) {
6629  // change this warning message to correct message
6630  KERNEL_WARN("updateNeuronMonitor(grpId=%d) is becoming very large. (>%lu MB)", gGrpId, (long int)MAX_NEURON_MON_BUFFER_SIZE / 1024);// make this better
6631  KERNEL_WARN("Reduce the cumulative recording time (currently %lu minutes) or the group size (currently %d) to avoid this.", nrnMonObj->getAccumTime() / (1000 * 60), this->getGroupNumNeurons(gGrpId));
6632  }*/
6633 
6634  // copy the neuron information to manager runtime
6635  fetchNeuronStateBuffer(netId, lGrpId);
6636 
6637  // find the time interval in which to update neuron state info
6638  // usually, we call updateNeuronMonitor once every second, so the time interval is [0,1000)
6639  // however, updateNeuronMonitor can be called at any time t \in [0,1000)... so we can have the cases
6640  // [0,t), [t,1000), and even [t1, t2)
6641  int numMsMin = lastUpdate % 1000; // lower bound is given by last time we called update
6642  int numMsMax = getSimTimeMs(); // upper bound is given by current time
6643  if (numMsMax == 0)
6644  numMsMax = 1000; // special case: full second
6645  assert(numMsMin < numMsMax);
6646  //KERNEL_INFO("lastUpdate: %d -- numMsMin: %d -- numMsMax: %d", lastUpdate, numMsMin, numMsMax);
6647 
6648  // current time is last completed second in milliseconds (plus t to be added below)
6649  // special case is after each completed second where !getSimTimeMs(): here we look 1s back
6650  int currentTimeSec = getSimTimeSec();
6651  if (!getSimTimeMs())
6652  currentTimeSec--;
6653 
6654  // save current time as last update time
6655  nrnMonObj->setLastUpdated((long int)getSimTime());
6656 
6657  // prepare fast access
6658  FILE* nrnFileId = neuronMonCoreList[monitorId]->getNeuronFileId();
6659  bool writeNeuronStateToFile = nrnFileId != NULL;
6660  bool writeNeuronStateToArray = nrnMonObj->isRecording();
6661 
6662  // Read one neuron state value at a time from the buffer and put the neuron state values to an appopriate monitor buffer.
6663  // Later the user may need need to dump these neuron state values to an output file
6664  //printf("The numMsMin is: %i; and numMsMax is: %i\n", numMsMin, numMsMax);
6665  for (int t = numMsMin; t < numMsMax; t++) {
6666  //printf("The lStartN is: %i; and lEndN is: %i\n", groupConfigs[netId][lGrpId].lStartN, groupConfigs[netId][lGrpId].lEndN);
6667  for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) {
6668  float v, u, I;
6669 
6670  // make sure neuron belongs to currently relevant group
6671  int this_grpId = managerRuntimeData.grpIds[lNId];
6672  if (this_grpId != lGrpId)
6673  continue;
6674 
6675  // adjust nid to be 0-indexed for each group
6676  // this way, if a group has 10 neurons, their IDs in the spike file and spike monitor will be
6677  // indexed from 0..9, no matter what their real nid is
6678  int nId = lNId - groupConfigs[netId][lGrpId].lStartN;
6679  assert(nId >= 0);
6680 
6681  int idxBase = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * t + lGrpId * MAX_NEURON_MON_GRP_SZIE;
6682  v = managerRuntimeData.nVBuffer[idxBase + nId];
6683  u = managerRuntimeData.nUBuffer[idxBase + nId];
6684  I = managerRuntimeData.nIBuffer[idxBase + nId];
6685 
6686  //printf("Voltage recorded is: %f\n", v);
6687 
6688  // current time is last completed second plus whatever is leftover in t
6689  int time = currentTimeSec * 1000 + t;
6690 
6691  //KERNEL_INFO("t: %d -- time: %d --base: %d -- nId: %d -- v: %f -- u: %f, --I: %f", t, time, idxBase + nId, nId, v, u, I);
6692 
6693  // WRITE TO A TEXT FILE INSTEAD OF BINARY
6694  if (writeNeuronStateToFile) {
6695  //KERNEL_INFO("Save to file");
6696  int cnt;
6697  cnt = fwrite(&time, sizeof(int), 1, nrnFileId); assert(cnt == 1);
6698  cnt = fwrite(&nId, sizeof(int), 1, nrnFileId); assert(cnt == 1);
6699  cnt = fwrite(&v, sizeof(float), 1, nrnFileId); assert(cnt == 1);
6700  cnt = fwrite(&u, sizeof(float), 1, nrnFileId); assert(cnt == 1);
6701  cnt = fwrite(&I, sizeof(float), 1, nrnFileId); assert(cnt == 1);
6702  }
6703 
6704  if (writeNeuronStateToArray) {
6705  //KERNEL_INFO("Save to array");
6706  nrnMonObj->pushNeuronState(nId, v, u, I);
6707  }
6708  }
6709  }
6710 
6711  if (nrnFileId != NULL) // flush neuron state file
6712  fflush(nrnFileId);
6713  }
6714 }
6715 
6716 // FIXME: update summary format for multiGPUs
6717 void SNN::printSimSummary() {
6718  float etime;
6719 
6720  // FIXME: measure total execution time, and GPU excution time
6721  stopTiming();
6722  etime = executionTime;
6723 
6724  fetchNetworkSpikeCount();
6725 
6726  KERNEL_INFO("\n");
6727  KERNEL_INFO("******************** Simulation Summary ***************************");
6728 
6729  KERNEL_INFO("Network Parameters: \tnumNeurons = %d (numNExcReg:numNInhReg = %2.1f:%2.1f)",
6730  glbNetworkConfig.numN, 100.0 * glbNetworkConfig.numNExcReg / glbNetworkConfig.numN, 100.0 * glbNetworkConfig.numNInhReg / glbNetworkConfig.numN);
6731  KERNEL_INFO("\t\t\tnumSynapses = %d", glbNetworkConfig.numSynNet);
6732  KERNEL_INFO("\t\t\tmaxDelay = %d", glbNetworkConfig.maxDelay);
6733  KERNEL_INFO("Simulation Mode:\t%s",sim_with_conductances?"COBA":"CUBA");
6734  KERNEL_INFO("Random Seed:\t\t%d", randSeed_);
6735  KERNEL_INFO("Timing:\t\t\tModel Simulation Time = %lld sec", (unsigned long long)simTimeSec);
6736  KERNEL_INFO("\t\t\tActual Execution Time = %4.2f sec", etime/1000.0f);
6737  KERNEL_INFO("Average Firing Rate:\t2+ms delay = %3.3f Hz",
6738  glbNetworkConfig.numN2msDelay > 0 ? managerRuntimeData.spikeCountD2 / (1.0 * simTimeSec * glbNetworkConfig.numN2msDelay) : 0.0f);
6739  KERNEL_INFO("\t\t\t1ms delay = %3.3f Hz",
6740  glbNetworkConfig.numN1msDelay > 0 ? managerRuntimeData.spikeCountD1 / (1.0 * simTimeSec * glbNetworkConfig.numN1msDelay) : 0.0f);
6741  KERNEL_INFO("\t\t\tOverall = %3.3f Hz", managerRuntimeData.spikeCount / (1.0 * simTimeSec * glbNetworkConfig.numN));
6742  KERNEL_INFO("Overall Spike Count Transferred:");
6743  KERNEL_INFO("\t\t\t2+ms delay = %d", managerRuntimeData.spikeCountExtRxD2);
6744  KERNEL_INFO("\t\t\t1ms delay = %d", managerRuntimeData.spikeCountExtRxD1);
6745  KERNEL_INFO("Overall Spike Count:\t2+ms delay = %d", managerRuntimeData.spikeCountD2);
6746  KERNEL_INFO("\t\t\t1ms delay = %d", managerRuntimeData.spikeCountD1);
6747  KERNEL_INFO("\t\t\tTotal = %d", managerRuntimeData.spikeCount);
6748  KERNEL_INFO("*********************************************************************************\n");
6749 }
6750 
6751 //------------------------------ legacy code --------------------------------//
6752 
6753 // We parallelly cleanup the postSynapticIds array to minimize any other wastage in that array by compacting the store
6754 // Appropriate alignment specified by ALIGN_COMPACTION macro is used to ensure some level of alignment (if necessary)
6755 //void SNN::compactConnections() {
6756 // unsigned int* tmp_cumulativePost = new unsigned int[numN];
6757 // unsigned int* tmp_cumulativePre = new unsigned int[numN];
6758 // unsigned int lastCnt_pre = 0;
6759 // unsigned int lastCnt_post = 0;
6760 //
6761 // tmp_cumulativePost[0] = 0;
6762 // tmp_cumulativePre[0] = 0;
6763 //
6764 // for(int i=1; i < numN; i++) {
6765 // lastCnt_post = tmp_cumulativePost[i-1]+managerRuntimeData.Npost[i-1]; //position of last pointer
6766 // lastCnt_pre = tmp_cumulativePre[i-1]+managerRuntimeData.Npre[i-1]; //position of last pointer
6767 // #if COMPACTION_ALIGNMENT_POST
6768 // lastCnt_post= lastCnt_post + COMPACTION_ALIGNMENT_POST-lastCnt_post%COMPACTION_ALIGNMENT_POST;
6769 // lastCnt_pre = lastCnt_pre + COMPACTION_ALIGNMENT_PRE- lastCnt_pre%COMPACTION_ALIGNMENT_PRE;
6770 // #endif
6771 // tmp_cumulativePost[i] = lastCnt_post;
6772 // tmp_cumulativePre[i] = lastCnt_pre;
6773 // assert(tmp_cumulativePost[i] <= managerRuntimeData.cumulativePost[i]);
6774 // assert(tmp_cumulativePre[i] <= managerRuntimeData.cumulativePre[i]);
6775 // }
6776 //
6777 // // compress the post_synaptic array according to the new values of the tmp_cumulative counts....
6778 // unsigned int tmp_numPostSynNet = tmp_cumulativePost[numN-1]+managerRuntimeData.Npost[numN-1];
6779 // unsigned int tmp_numPreSynNet = tmp_cumulativePre[numN-1]+managerRuntimeData.Npre[numN-1];
6780 // assert(tmp_numPostSynNet <= allocatedPost);
6781 // assert(tmp_numPreSynNet <= allocatedPre);
6782 // assert(tmp_numPostSynNet <= numPostSynNet);
6783 // assert(tmp_numPreSynNet <= numPreSynNet);
6784 // KERNEL_DEBUG("******************");
6785 // KERNEL_DEBUG("CompactConnection: ");
6786 // KERNEL_DEBUG("******************");
6787 // KERNEL_DEBUG("old_postCnt = %d, new_postCnt = %d", numPostSynNet, tmp_numPostSynNet);
6788 // KERNEL_DEBUG("old_preCnt = %d, new_postCnt = %d", numPreSynNet, tmp_numPreSynNet);
6789 //
6790 // // new buffer with required size + 100 bytes of additional space just to provide limited overflow
6791 // SynInfo* tmp_postSynapticIds = new SynInfo[tmp_numPostSynNet+100];
6792 //
6793 // // new buffer with required size + 100 bytes of additional space just to provide limited overflow
6794 // SynInfo* tmp_preSynapticIds = new SynInfo[tmp_numPreSynNet+100];
6795 // float* tmp_wt = new float[tmp_numPreSynNet+100];
6796 // float* tmp_maxSynWt = new float[tmp_numPreSynNet+100];
6797 // short int *tmp_cumConnIdPre = new short int[tmp_numPreSynNet+100];
6798 // float *tmp_mulSynFast = new float[numConnections];
6799 // float *tmp_mulSynSlow = new float[numConnections];
6800 //
6801 // // compact synaptic information
6802 // for(int i=0; i<numN; i++) {
6803 // assert(tmp_cumulativePost[i] <= managerRuntimeData.cumulativePost[i]);
6804 // assert(tmp_cumulativePre[i] <= managerRuntimeData.cumulativePre[i]);
6805 // for( int j=0; j<managerRuntimeData.Npost[i]; j++) {
6806 // unsigned int tmpPos = tmp_cumulativePost[i]+j;
6807 // unsigned int oldPos = managerRuntimeData.cumulativePost[i]+j;
6808 // tmp_postSynapticIds[tmpPos] = managerRuntimeData.postSynapticIds[oldPos];
6809 // tmp_SynapticDelay[tmpPos] = tmp_SynapticDelay[oldPos];
6810 // }
6811 // for( int j=0; j<managerRuntimeData.Npre[i]; j++) {
6812 // unsigned int tmpPos = tmp_cumulativePre[i]+j;
6813 // unsigned int oldPos = managerRuntimeData.cumulativePre[i]+j;
6814 // tmp_preSynapticIds[tmpPos] = managerRuntimeData.preSynapticIds[oldPos];
6815 // tmp_maxSynWt[tmpPos] = managerRuntimeData.maxSynWt[oldPos];
6816 // tmp_wt[tmpPos] = managerRuntimeData.wt[oldPos];
6817 // tmp_cumConnIdPre[tmpPos] = managerRuntimeData.connIdsPreIdx[oldPos];
6818 // }
6819 // }
6820 //
6821 // // delete old buffer space
6822 // delete[] managerRuntimeData.postSynapticIds;
6823 // managerRuntimeData.postSynapticIds = tmp_postSynapticIds;
6824 // cpuSnnSz.networkInfoSize -= (sizeof(SynInfo)*numPostSynNet);
6825 // cpuSnnSz.networkInfoSize += (sizeof(SynInfo)*(tmp_numPostSynNet+100));
6826 //
6827 // delete[] managerRuntimeData.cumulativePost;
6828 // managerRuntimeData.cumulativePost = tmp_cumulativePost;
6829 //
6830 // delete[] managerRuntimeData.cumulativePre;
6831 // managerRuntimeData.cumulativePre = tmp_cumulativePre;
6832 //
6833 // delete[] managerRuntimeData.maxSynWt;
6834 // managerRuntimeData.maxSynWt = tmp_maxSynWt;
6835 // cpuSnnSz.synapticInfoSize -= (sizeof(float)*numPreSynNet);
6836 // cpuSnnSz.synapticInfoSize += (sizeof(float)*(tmp_numPreSynNet+100));
6837 //
6838 // delete[] managerRuntimeData.wt;
6839 // managerRuntimeData.wt = tmp_wt;
6840 // cpuSnnSz.synapticInfoSize -= (sizeof(float)*numPreSynNet);
6841 // cpuSnnSz.synapticInfoSize += (sizeof(float)*(tmp_numPreSynNet+100));
6842 //
6843 // delete[] managerRuntimeData.connIdsPreIdx;
6844 // managerRuntimeData.connIdsPreIdx = tmp_cumConnIdPre;
6845 // cpuSnnSz.synapticInfoSize -= (sizeof(short int)*numPreSynNet);
6846 // cpuSnnSz.synapticInfoSize += (sizeof(short int)*(tmp_numPreSynNet+100));
6847 //
6848 // // compact connection-centric information
6849 // for (int i=0; i<numConnections; i++) {
6850 // tmp_mulSynFast[i] = mulSynFast[i];
6851 // tmp_mulSynSlow[i] = mulSynSlow[i];
6852 // }
6853 // delete[] mulSynFast;
6854 // delete[] mulSynSlow;
6855 // mulSynFast = tmp_mulSynFast;
6856 // mulSynSlow = tmp_mulSynSlow;
6857 // cpuSnnSz.networkInfoSize -= (2*sizeof(uint8_t)*numPreSynNet);
6858 // cpuSnnSz.networkInfoSize += (2*sizeof(uint8_t)*(tmp_numPreSynNet+100));
6859 //
6860 //
6861 // delete[] managerRuntimeData.preSynapticIds;
6862 // managerRuntimeData.preSynapticIds = tmp_preSynapticIds;
6863 // cpuSnnSz.synapticInfoSize -= (sizeof(SynInfo)*numPreSynNet);
6864 // cpuSnnSz.synapticInfoSize += (sizeof(SynInfo)*(tmp_numPreSynNet+100));
6865 //
6866 // numPreSynNet = tmp_numPreSynNet;
6867 // numPostSynNet = tmp_numPostSynNet;
6868 //}
6869 
6870 //The post synaptic connections are sorted based on delay here so that we can reduce storage requirement
6871 //and generation of spike at the post-synaptic side.
6872 //We also create the delay_info array has the delay_start and delay_length parameter
6873 //void SNN::reorganizeDelay()
6874 //{
6875 // for(int grpId=0; grpId < numGroups; grpId++) {
6876 // for(int nid=groupConfigs[0][grpId].StartN; nid <= groupConfigs[0][grpId].EndN; nid++) {
6877 // unsigned int jPos=0; // this points to the top of the delay queue
6878 // unsigned int cumN=managerRuntimeData.cumulativePost[nid]; // cumulativePost[] is unsigned int
6879 // unsigned int cumDelayStart=0; // Npost[] is unsigned short
6880 // for(int td = 0; td < maxDelay_; td++) {
6881 // unsigned int j=jPos; // start searching from top of the queue until the end
6882 // unsigned int cnt=0; // store the number of nodes with a delay of td;
6883 // while(j < managerRuntimeData.Npost[nid]) {
6884 // // found a node j with delay=td and we put
6885 // // the delay value = 1 at array location td=0;
6886 // if(td==(tmp_SynapticDelay[cumN+j]-1)) {
6887 // assert(jPos<managerRuntimeData.Npost[nid]);
6888 // swapConnections(nid, j, jPos);
6889 //
6890 // jPos=jPos+1;
6891 // cnt=cnt+1;
6892 // }
6893 // j=j+1;
6894 // }
6895 //
6896 // // update the delay_length and start values...
6897 // managerRuntimeData.postDelayInfo[nid*(maxDelay_+1)+td].delay_length = cnt;
6898 // managerRuntimeData.postDelayInfo[nid*(maxDelay_+1)+td].delay_index_start = cumDelayStart;
6899 // cumDelayStart += cnt;
6900 //
6901 // assert(cumDelayStart <= managerRuntimeData.Npost[nid]);
6902 // }
6903 //
6904 // // total cumulative delay should be equal to number of post-synaptic connections at the end of the loop
6905 // assert(cumDelayStart == managerRuntimeData.Npost[nid]);
6906 // for(unsigned int j=1; j < managerRuntimeData.Npost[nid]; j++) {
6907 // unsigned int cumN=managerRuntimeData.cumulativePost[nid]; // cumulativePost[] is unsigned int
6908 // if( tmp_SynapticDelay[cumN+j] < tmp_SynapticDelay[cumN+j-1]) {
6909 // KERNEL_ERROR("Post-synaptic delays not sorted correctly... id=%d, delay[%d]=%d, delay[%d]=%d",
6910 // nid, j, tmp_SynapticDelay[cumN+j], j-1, tmp_SynapticDelay[cumN+j-1]);
6911 // assert( tmp_SynapticDelay[cumN+j] >= tmp_SynapticDelay[cumN+j-1]);
6912 // }
6913 // }
6914 // }
6915 // }
6916 //}
6917 
6918 //void SNN::swapConnections(int nid, int oldPos, int newPos) {
6919 // unsigned int cumN=managerRuntimeData.cumulativePost[nid];
6920 //
6921 // // Put the node oldPos to the top of the delay queue
6922 // SynInfo tmp = managerRuntimeData.postSynapticIds[cumN+oldPos];
6923 // managerRuntimeData.postSynapticIds[cumN+oldPos]= managerRuntimeData.postSynapticIds[cumN+newPos];
6924 // managerRuntimeData.postSynapticIds[cumN+newPos]= tmp;
6925 //
6926 // // Ensure that you have shifted the delay accordingly....
6927 // uint8_t tmp_delay = tmp_SynapticDelay[cumN+oldPos];
6928 // tmp_SynapticDelay[cumN+oldPos] = tmp_SynapticDelay[cumN+newPos];
6929 // tmp_SynapticDelay[cumN+newPos] = tmp_delay;
6930 //
6931 // // update the pre-information for the postsynaptic neuron at the position oldPos.
6932 // SynInfo postInfo = managerRuntimeData.postSynapticIds[cumN+oldPos];
6933 // int post_nid = GET_CONN_NEURON_ID(postInfo);
6934 // int post_sid = GET_CONN_SYN_ID(postInfo);
6935 //
6936 // SynInfo* preId = &(managerRuntimeData.preSynapticIds[managerRuntimeData.cumulativePre[post_nid]+post_sid]);
6937 // int pre_nid = GET_CONN_NEURON_ID((*preId));
6938 // int pre_sid = GET_CONN_SYN_ID((*preId));
6939 // int pre_gid = GET_CONN_GRP_ID((*preId));
6940 // assert (pre_nid == nid);
6941 // assert (pre_sid == newPos);
6942 // *preId = SET_CONN_ID( pre_nid, oldPos, pre_gid);
6943 //
6944 // // update the pre-information for the postsynaptic neuron at the position newPos
6945 // postInfo = managerRuntimeData.postSynapticIds[cumN+newPos];
6946 // post_nid = GET_CONN_NEURON_ID(postInfo);
6947 // post_sid = GET_CONN_SYN_ID(postInfo);
6948 //
6949 // preId = &(managerRuntimeData.preSynapticIds[managerRuntimeData.cumulativePre[post_nid]+post_sid]);
6950 // pre_nid = GET_CONN_NEURON_ID((*preId));
6951 // pre_sid = GET_CONN_SYN_ID((*preId));
6952 // pre_gid = GET_CONN_GRP_ID((*preId));
6953 // assert (pre_nid == nid);
6954 // assert (pre_sid == oldPos);
6955 // *preId = SET_CONN_ID( pre_nid, newPos, pre_gid);
6956 //}
6957 
6958 // set one specific connection from neuron id 'src' to neuron id 'dest'
6959 //inline void SNN::setConnection(int srcGrp, int destGrp, unsigned int src, unsigned int dest, float synWt,
6960 // float maxWt, uint8_t dVal, int connProp, short int connId) {
6961 // assert(dest<=CONN_SYN_NEURON_MASK); // total number of neurons is less than 1 million within a GPU
6962 // assert((dVal >=1) && (dVal <= maxDelay_));
6963 //
6964 // // adjust sign of weight based on pre-group (negative if pre is inhibitory)
6965 // synWt = isExcitatoryGroup(srcGrp) ? fabs(synWt) : -1.0*fabs(synWt);
6966 // maxWt = isExcitatoryGroup(srcGrp) ? fabs(maxWt) : -1.0*fabs(maxWt);
6967 //
6968 // // we have exceeded the number of possible connection for one neuron
6969 // if(managerRuntimeData.Npost[src] >= groupConfigs[0][srcGrp].numPostSynapses) {
6970 // KERNEL_ERROR("setConnection(%d (Grp=%s), %d (Grp=%s), %f, %d)", src, groupInfo[srcGrp].Name.c_str(),
6971 // dest, groupInfo[destGrp].Name.c_str(), synWt, dVal);
6972 // KERNEL_ERROR("Large number of postsynaptic connections established (%d), max for this group %d.", managerRuntimeData.Npost[src], groupConfigs[0][srcGrp].numPostSynapses);
6973 // exitSimulation(1);
6974 // }
6975 //
6976 // if(managerRuntimeData.Npre[dest] >= groupConfigs[0][destGrp].numPreSynapses) {
6977 // KERNEL_ERROR("setConnection(%d (Grp=%s), %d (Grp=%s), %f, %d)", src, groupInfo[srcGrp].Name.c_str(),
6978 // dest, groupInfo[destGrp].Name.c_str(), synWt, dVal);
6979 // KERNEL_ERROR("Large number of presynaptic connections established (%d), max for this group %d.", managerRuntimeData.Npre[dest], groupConfigs[0][destGrp].numPreSynapses);
6980 // exitSimulation(1);
6981 // }
6982 //
6983 // int p = managerRuntimeData.Npost[src];
6984 //
6985 // assert(managerRuntimeData.Npost[src] >= 0);
6986 // assert(managerRuntimeData.Npre[dest] >= 0);
6987 // assert((src * maxNumPostSynGrp + p) / numN < maxNumPostSynGrp); // divide by numN to prevent INT overflow
6988 //
6989 // unsigned int post_pos = managerRuntimeData.cumulativePost[src] + managerRuntimeData.Npost[src];
6990 // unsigned int pre_pos = managerRuntimeData.cumulativePre[dest] + managerRuntimeData.Npre[dest];
6991 //
6992 // assert(post_pos < numPostSynNet);
6993 // assert(pre_pos < numPreSynNet);
6994 //
6995 // //generate a new postSynapticIds id for the current connection
6996 // managerRuntimeData.postSynapticIds[post_pos] = SET_CONN_ID(dest, managerRuntimeData.Npre[dest], destGrp);
6997 // tmp_SynapticDelay[post_pos] = dVal;
6998 //
6999 // managerRuntimeData.preSynapticIds[pre_pos] = SET_CONN_ID(src, managerRuntimeData.Npost[src], srcGrp);
7000 // managerRuntimeData.wt[pre_pos] = synWt;
7001 // managerRuntimeData.maxSynWt[pre_pos] = maxWt;
7002 // managerRuntimeData.connIdsPreIdx[pre_pos] = connId;
7003 //
7004 // bool synWtType = GET_FIXED_PLASTIC(connProp);
7005 //
7006 // if (synWtType == SYN_PLASTIC) {
7007 // sim_with_fixedwts = false; // if network has any plastic synapses at all, this will be set to true
7008 // managerRuntimeData.Npre_plastic[dest]++;
7009 // // homeostasis
7010 // if (groupConfigs[0][destGrp].WithHomeostasis && groupConfigs[0][destGrp].homeoId ==-1)
7011 // groupConfigs[0][destGrp].homeoId = dest; // this neuron info will be printed
7012 // }
7013 //
7014 // managerRuntimeData.Npre[dest] += 1;
7015 // managerRuntimeData.Npost[src] += 1;
7016 //
7017 // groupInfo[srcGrp].numPostConn++;
7018 // groupInfo[destGrp].numPreConn++;
7019 //
7020 // if (managerRuntimeData.Npost[src] > groupInfo[srcGrp].maxPostConn)
7021 // groupInfo[srcGrp].maxPostConn = managerRuntimeData.Npost[src];
7022 // if (managerRuntimeData.Npre[dest] > groupInfo[destGrp].maxPreConn)
7023 // groupInfo[destGrp].maxPreConn = managerRuntimeData.Npre[src];
7024 //}
SpikeBuffer::step
void step()
advance to next time step
Definition: spike_buffer.cpp:220
GroupMonitorCore::setLastUpdated
void setLastUpdated(unsigned int lastUpdate)
sets timestamp of last GroupMonitor update
Definition: group_monitor_core.h:146
GPU_MODE
@ GPU_MODE
model is run on GPU card(s)
Definition: carlsim_datastructures.h:115
SpikeBuffer::SpikeNode::neurId
int neurId
corresponding global neuron Id
Definition: spike_buffer.h:90
INTERVAL_100MS
@ INTERVAL_100MS
the update interval will be 100 ms, which is 10Hz update frequency
Definition: carlsim_datastructures.h:239
NetworkConfigRT_s::wtChangeDecay
float wtChangeDecay
the wtChange decay
Definition: snn_datastructures.h:676
compareDelay
bool compareDelay(const ConnectionInfo &first, const ConnectionInfo &second)
Definition: snn_manager.cpp:3219
ThreadStruct_s::endIdx
int endIdx
Definition: snn_datastructures.h:727
RuntimeData_s::gNMDA
float * gNMDA
conductance of gNMDA
Definition: snn_datastructures.h:494
SpikeGeneratorCore::nextSpikeTime
virtual int nextSpikeTime(SNN *s, int grpId, int i, int currentTime, int lastScheduledSpikeTime, int endOfTimeSlice)
controls spike generation using a callback mechanism
Definition: callback_core.cpp:61
RuntimeData_s::Izh_d
float * Izh_d
Definition: snn_datastructures.h:475
SNN::getNeuronMonitor
NeuronMonitor * getNeuronMonitor(int grpId)
Returns pointer to existing NeuronMonitor object, NULL else.
Definition: snn_manager.cpp:1980
ConnectionInfo_s::delay
uint8_t delay
Definition: snn_datastructures.h:104
GroupSTDPInfo_s::TAU_MINUS_INV_EXC
float TAU_MINUS_INV_EXC
the inverse of time constant minus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:420
GroupConfigRT_s::WithISTDPcurve
STDPCurve WithISTDPcurve
published by GroupConfig
Definition: snn_datastructures.h:396
SNN::getSimTimeSec
int getSimTimeSec()
Definition: snn.h:581
POISSON_NEURON
#define POISSON_NEURON
Definition: carlsim_definitions.h:65
NetworkConfigRT_s::stdpScaleFactor
float stdpScaleFactor
Definition: snn_datastructures.h:675
Grid3D::offsetX
float offsetX
Definition: carlsim_datastructures.h:549
SNN::getGroupNumNeurons
int getGroupNumNeurons(int gGrpId)
Definition: snn.h:558
RuntimeData_s::totalCurrent
float * totalCurrent
Definition: snn_datastructures.h:477
GroupSTDPInfo_s::WithISTDPtype
STDPType WithISTDPtype
the type of I-STDP (STANDARD or DA_MOD)
Definition: carlsim_datastructures.h:416
SNN::setGroupMonitor
GroupMonitor * setGroupMonitor(int grpId, FILE *fid)
sets up a group monitor registered with a callback to process the spikes.
Definition: snn_manager.cpp:1087
SNN::setConductances
void setConductances(bool isSet, int tdAMPA, int trNMDA, int tdNMDA, int tdGABAa, int trGABAb, int tdGABAb)
Sets custom values for conductance decay (\tau_decay) or disables conductances alltogether These will...
Definition: snn_manager.cpp:408
TARGET_GABAb
#define TARGET_GABAb
Definition: carlsim_definitions.h:69
RuntimeData_s::spikeCountExtRxD2
unsigned int spikeCountExtRxD2
the number of external spikes with axonal delay > 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:461
ConnectConfig_s::mulSynFast
float mulSynFast
factor to be applied to either gAMPA or gGABAa
Definition: snn_datastructures.h:128
SpikeMonitorCore::getAccumTime
long int getAccumTime()
returns the total accumulated time
Definition: spike_monitor_core.cpp:483
RuntimeData_s::grpNEBuffer
float * grpNEBuffer
Definition: snn_datastructures.h:580
STP_BUF_POS
#define STP_BUF_POS(nid, t, maxDelay)
Definition: snn_definitions.h:102
SynInfo_s
Definition: snn_datastructures.h:89
SimMode
SimMode
simulation mode
Definition: carlsim_datastructures.h:113
NetworkConfigRT_s::maxSpikesD2
unsigned int maxSpikesD2
the estimated maximum number of spikes with delay >= 2 in a network
Definition: snn_datastructures.h:651
integrationMethod_t
integrationMethod_t
Integration methods.
Definition: carlsim_datastructures.h:132
RuntimeData_s::lif_tau_ref
int * lif_tau_ref
Definition: snn_datastructures.h:481
AER
@ AER
mode in which spike information is collected in AER format
Definition: carlsim_datastructures.h:202
TIMING_COUNT
#define TIMING_COUNT
Definition: snn_definitions.h:155
UNKNOWN_LOGGER_ERROR
#define UNKNOWN_LOGGER_ERROR
Definition: error_code.h:93
GroupConfig_s::grid
Grid3D grid
Definition: snn_datastructures.h:320
connection_monitor_core.h
IS_EXCITATORY_TYPE
#define IS_EXCITATORY_TYPE(type)
Definition: carlsim_definitions.h:81
SpikeMonitorCore
Definition: spike_monitor_core.h:70
GroupSTDPInfo_s::TAU_PLUS_INV_EXC
float TAU_PLUS_INV_EXC
the inverse of time constant plus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:419
SNN::loadSimulation
void loadSimulation(FILE *fid)
Definition: snn_manager.cpp:1000
GroupNeuromodulatorInfo_s::baseDP
float baseDP
baseline concentration of Dopamine
Definition: carlsim_datastructures.h:444
GroupConfigRT_s::decayDP
float decayDP
decay rate for Dopaamine, published by GroupConfig
Definition: snn_datastructures.h:434
DelayInfo_s
Definition: snn_datastructures.h:84
SNN::getNumGroups
int getNumGroups()
Definition: snn.h:568
SNN::setNeuronParametersLIF
void setNeuronParametersLIF(int grpId, int tau_m, int tau_ref, float vTh, float vReset, double minRmem, double maxRmem)
Sets neuron parameters for a group of LIF spiking neurons.
Definition: snn_manager.cpp:574
SET_FIXED_PLASTIC
#define SET_FIXED_PLASTIC(a)
Definition: snn_definitions.h:201
CONN_FULL
@ CONN_FULL
Definition: snn_datastructures.h:74
RuntimeData_s::lif_tau_ref_c
int * lif_tau_ref_c
Definition: snn_datastructures.h:482
SNN::biasWeights
void biasWeights(short int connId, float bias, bool updateWeightRange=false)
Definition: snn_manager.cpp:915
RuntimeData_s::timeTableD2
unsigned int * timeTableD2
firing table, only used in CPU_MODE currently
Definition: snn_datastructures.h:543
RangeWeight
a range struct for synaptic weight magnitudes
Definition: carlsim_datastructures.h:311
NetworkConfigRT_s::dAMPA
double dAMPA
multiplication factor for decay time of AMPA conductance (gAMPA[i] *= dAMPA)
Definition: snn_datastructures.h:681
spike_monitor.h
SpikeBuffer::schedule
void schedule(int neurId, int grpId, unsigned short int delay)
Schedule a spike.
Definition: spike_buffer.cpp:219
SpikeMonitor
Class SpikeMonitor.
Definition: spike_monitor.h:119
SNN::connect
short int connect(int gIDpre, int gIDpost, const std::string &_type, float initWt, float maxWt, float prob, uint8_t minDelay, uint8_t maxDelay, RadiusRF radius, float mulSynFast, float mulSynSlow, bool synWtType)
make from each neuron in grpId1 to 'numPostSynapses' neurons in grpId2
Definition: snn_manager.cpp:95
NetworkConfigRT_s::rNMDA
double rNMDA
multiplication factor for rise time of NMDA
Definition: snn_datastructures.h:682
GroupConfigRT_s::LAMBDA
float LAMBDA
published by GroupConfig
Definition: snn_datastructures.h:420
NetworkConfigRT_s::sGABAb
double sGABAb
scaling factor for GABAb amplitude
Definition: snn_datastructures.h:688
SNN::isSimulationWithGABAbRise
bool isSimulationWithGABAbRise()
Definition: snn.h:636
SNN::saveSimulation
void saveSimulation(FILE *fid, bool saveSynapseInfo=false)
stores the pre and post synaptic neuron ids with the weight and delay
Definition: snn_manager.cpp:1410
RuntimeData_s::wtChange
float * wtChange
stores the weight change of a synaptic connection
Definition: snn_datastructures.h:522
MAX_CONN_PER_SNN
#define MAX_CONN_PER_SNN
Definition: snn_definitions.h:131
RuntimeData_s::Izh_vr
float * Izh_vr
Definition: snn_datastructures.h:469
Grid3D::offsetZ
float offsetZ
Definition: carlsim_datastructures.h:549
GroupSTDPInfo_s::BETA_LTD
float BETA_LTD
the amplitude of inhibitory LTD if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:429
MAX_GRP_PER_SNN
#define MAX_GRP_PER_SNN
Definition: snn_definitions.h:132
GroupConfigRT_s::WithESTDP
bool WithESTDP
published by GroupConfig
Definition: snn_datastructures.h:391
GlobalNetworkConfig_s::numN2msDelay
int numN2msDelay
number of neurons with maximum out going axonal delay >= 2 ms
Definition: snn_datastructures.h:612
GroupConfigRT_s::WithESTDPtype
STDPType WithESTDPtype
published by GroupConfig
Definition: snn_datastructures.h:393
RuntimeData_s
Definition: snn_datastructures.h:450
NetworkConfigRT_s::dNMDA
double dNMDA
multiplication factor for decay time of NMDA
Definition: snn_datastructures.h:683
SNN::getConductanceGABAa
std::vector< float > getConductanceGABAa(int grpId)
Definition: snn_manager.cpp:1760
ConnectConfig_s::initWt
float initWt
Definition: snn_datastructures.h:125
SNN::getGroupSTDPInfo
GroupSTDPInfo getGroupSTDPInfo(int grpId)
Definition: snn_manager.cpp:1873
RadiusRF::radX
double radX
Definition: carlsim_datastructures.h:372
Grid3D::distX
float distX
Definition: carlsim_datastructures.h:548
GroupSTDPInfo_s::WithSTDP
bool WithSTDP
enable STDP flag
Definition: carlsim_datastructures.h:412
compConnectConfig_s::grpDest
int grpDest
Definition: snn_datastructures.h:180
SNN::setSpikeGenerator
void setSpikeGenerator(int grpId, SpikeGeneratorCore *spikeGenFunc)
sets up a spike generator
Definition: snn_manager.cpp:1173
SNN::isPoint3DinRF
bool isPoint3DinRF(const RadiusRF &radius, const Point3D &pre, const Point3D &post)
Definition: snn_manager.cpp:5061
RuntimeData_s::nextVoltage
float * nextVoltage
membrane potential buffer (next/future time step) for each regular neuron
Definition: snn_datastructures.h:465
GroupSTDPInfo_s::ALPHA_MINUS_EXC
float ALPHA_MINUS_EXC
the amplitude of alpha minus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:422
NetworkConfigRT_s::numNSpikeGen
int numNSpikeGen
number of poisson neurons generating spikes based on callback functions
Definition: snn_datastructures.h:638
RuntimeData_s::baseFiring
float * baseFiring
Definition: snn_datastructures.h:565
GroupConfigRT_s::ALPHA_PLUS_INB
float ALPHA_PLUS_INB
published by GroupConfig
Definition: snn_datastructures.h:416
EXP_CURVE
@ EXP_CURVE
standard exponential curve
Definition: carlsim_datastructures.h:178
COMPILED_SNN
@ COMPILED_SNN
Definition: snn_datastructures.h:79
ConnectConfig_s::grpSrc
int grpSrc
Definition: snn_datastructures.h:120
SNN::runNetwork
int runNetwork(int _nsec, int _nmsec, bool printRunSummary)
run the simulation for n sec
Definition: snn_manager.cpp:793
SNN::setWeightAndWeightChangeUpdate
void setWeightAndWeightChangeUpdate(UpdateInterval wtANDwtChangeUpdateInterval, bool enableWtChangeDecay, float wtChangeDecay)
Sets the weight and weight change update parameters.
Definition: snn_manager.cpp:723
CONN_UNKNOWN
@ CONN_UNKNOWN
Definition: snn_datastructures.h:74
NetworkConfigRT_s::dGABAa
double dGABAa
multiplication factor for decay time of GABAa
Definition: snn_datastructures.h:685
GroupSTDPInfo_s::TAU_MINUS_INV_INB
float TAU_MINUS_INV_INB
the inverse of tau minus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:424
RuntimeData_s::stpx
float * stpx
Definition: snn_datastructures.h:511
GroupSTDPInfo_s::ALPHA_PLUS_EXC
float ALPHA_PLUS_EXC
the amplitude of alpha plus, if the exponential or timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:421
SNN::getNeuronMonitorCore
NeuronMonitorCore * getNeuronMonitorCore(int grpId)
Definition: snn_manager.cpp:1991
NetworkConfigRT_s::rGABAb
double rGABAb
multiplication factor for rise time of GABAb
Definition: snn_datastructures.h:686
RuntimeData_s::Izh_vpeak
float * Izh_vpeak
Definition: snn_datastructures.h:471
ConnectionMonitorCore
Definition: connection_monitor_core.h:67
NetworkConfigRT_s::sim_with_GABAb_rise
bool sim_with_GABAb_rise
a flag to inform whether to compute GABAb rise time
Definition: snn_datastructures.h:680
group_monitor_core.h
SpikeGeneratorCore
used for relaying callback to SpikeGenerator
Definition: callback_core.h:69
CHECK_CONNECTION_ID
#define CHECK_CONNECTION_ID(n, total)
< Used for in the function getConnectionId
Definition: snn_definitions.h:87
GroupConfigRT_s::TAU_PLUS_INV_EXC
float TAU_PLUS_INV_EXC
published by GroupConfig
Definition: snn_datastructures.h:407
SNN::updateGroupMonitor
void updateGroupMonitor(int grpId=ALL)
access group status (currently the concentration of neuromodulator)
Definition: snn_manager.cpp:6279
SNN::getSpikeMonitorCore
SpikeMonitorCore * getSpikeMonitorCore(int grpId)
Definition: snn_manager.cpp:1969
SNN::isConnectionPlastic
bool isConnectionPlastic(short int connId)
returns whether synapses in connection are fixed (false) or plastic (true)
Definition: snn_manager.cpp:4883
NeuronMonitorCore::getLastUpdated
long int getLastUpdated()
returns timestamp of last NeuronMonitor update
Definition: neuron_monitor_core.h:93
RuntimeData_s::Izh_b
float * Izh_b
Definition: snn_datastructures.h:473
SNN::getRFDist3D
double getRFDist3D(const RadiusRF &radius, const Point3D &pre, const Point3D &post)
checks whether a point pre lies in the receptive field for point post
Definition: snn_manager.cpp:5070
GroupConfigRT_s::numPostSynapses
int numPostSynapses
the total number of post-connections of a group, published by GroupConfigMD
Definition: snn_datastructures.h:385
RuntimeData_s::spikeCount
unsigned int spikeCount
the total number of spikes in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:456
ConnectionInfo_s::grpDest
int grpDest
Definition: snn_datastructures.h:96
RuntimeData_s::spikeCountD2Sec
unsigned int spikeCountD2Sec
the total number of spikes with axonal delay > 1 in 1 second, used in CPU_MODE currently
Definition: snn_datastructures.h:453
SNN::setExternalCurrent
void setExternalCurrent(int grpId, const std::vector< float > &current)
injects current (mA) into the soma of every neuron in the group
Definition: snn_manager.cpp:1377
NeuronMonitorCore::pushNeuronState
void pushNeuronState(int neurId, float V, float U, float I)
inserts a (time,neurId) tupel into the D Neuron State vector
Definition: neuron_monitor_core.cpp:117
GroupConfig_s::type
unsigned int type
Definition: snn_datastructures.h:308
SHOWTIME
@ SHOWTIME
Showtime mode, will only output warnings and errors.
Definition: carlsim_datastructures.h:93
SNN::setLogsFp
void setLogsFp(FILE *fpInf=NULL, FILE *fpErr=NULL, FILE *fpDeb=NULL, FILE *fpLog=NULL)
Sets the file pointers for all log files file pointer NULL means don't change it.
Definition: snn_manager.cpp:1671
RuntimeData_s::gGABAa
float * gGABAa
conductance of gGABAa
Definition: snn_datastructures.h:498
NetworkConfigRT_s::sim_with_nm
bool sim_with_nm
Definition: snn_datastructures.h:672
GroupConfigRT_s::GtoLOffset
int GtoLOffset
published by GroupConfigMD
Definition: snn_datastructures.h:382
NetworkConfigRT_s
runtime network configuration
Definition: snn_datastructures.h:625
SET_CONN_PRESENT
#define SET_CONN_PRESENT(a)
Definition: snn_definitions.h:200
GroupConfigRT_s::baseDP
float baseDP
baseline concentration of Dopamine, published by GroupConfig
Definition: snn_datastructures.h:430
RuntimeData_s::current
float * current
Definition: snn_datastructures.h:476
GroupConfigRT_s::withCompartments
bool withCompartments
Definition: snn_datastructures.h:442
RuntimeData_s::Npre
unsigned short * Npre
stores the number of input connections to a neuron
Definition: snn_datastructures.h:514
GroupConfigRT_s::WithISTDPtype
STDPType WithISTDPtype
published by GroupConfig
Definition: snn_datastructures.h:394
RuntimeData_s::grpAChBuffer
float * grpAChBuffer
Definition: snn_datastructures.h:579
GroupSTDPInfo_s
A struct for retrieving STDP related information of a group.
Definition: carlsim_datastructures.h:411
GlobalNetworkConfig_s::simIntegrationMethod
integrationMethod_t simIntegrationMethod
integration method (forward-Euler or Fourth-order Runge-Kutta)
Definition: snn_datastructures.h:614
SNN::getConnectConfig
ConnectConfig getConnectConfig(short int connectId)
required for homeostasis
Definition: snn_manager.cpp:1716
NetworkConfigRT_s::sim_with_stdp
bool sim_with_stdp
Definition: snn_datastructures.h:664
Grid3D::numX
int numX
Definition: carlsim_datastructures.h:547
SpikeMonitorCore::pushAER
void pushAER(int time, int neurId)
inserts a (time,neurId) tupel into the 2D spike vector
Definition: spike_monitor_core.cpp:303
ConnectionGeneratorCore
used for relaying callback to ConnectionGenerator
Definition: callback_core.h:89
NUM_SYNAPSE_BITS
#define NUM_SYNAPSE_BITS
Definition: snn_definitions.h:187
error_code.h
ConnectionInfo_s::connId
short int connId
Definition: snn_datastructures.h:103
SpikeMonitorCore::isRecording
bool isRecording()
returns recording status
Definition: spike_monitor_core.h:151
GroupConfigRT_s::netId
int netId
published by GroupConfigMD
Definition: snn_datastructures.h:374
GroupConfigRT_s::decayACh
float decayACh
decay rate for Acetylcholine, published by GroupConfig
Definition: snn_datastructures.h:436
PoissonRate
Class for generating Poisson spike trains.
Definition: poisson_rate.h:84
GroupConfigRT_s::isSpikeGenerator
bool isSpikeGenerator
published by GroupConfig
Definition: snn_datastructures.h:387
GroupConfigRT_s::numPreSynapses
int numPreSynapses
the total number of pre-connections of a group, published by GroupConfigMD
Definition: snn_datastructures.h:386
GlobalNetworkConfig_s::numSynNet
int numSynNet
number of total synaptic connections in the global network
Definition: snn_datastructures.h:609
RuntimeData_s::spikeGenBits
unsigned int * spikeGenBits
Definition: snn_datastructures.h:587
ConnectConfig_s::connId
short int connId
connectID of the element in the linked list
Definition: snn_datastructures.h:135
SNN::getConductanceNMDA
std::vector< float > getConductanceNMDA(int grpId)
Definition: snn_manager.cpp:1740
SNN::setCompartmentParameters
void setCompartmentParameters(int grpId, float couplingUp, float couplingDown)
Coupling constants for the compartment are set using this method.
Definition: snn_manager.cpp:392
GroupNeuromodulatorInfo_s::decayACh
float decayACh
decay rate for Acetylcholine
Definition: carlsim_datastructures.h:450
RuntimeData_s::lif_vTh
float * lif_vTh
Definition: snn_datastructures.h:483
NetworkConfigRT_s::numConnections
int numConnections
number of local connections in this local network
Definition: snn_datastructures.h:657
RuntimeData_s::recovery
float * recovery
Definition: snn_datastructures.h:466
INTERVAL_1000MS
@ INTERVAL_1000MS
the update interval will be 1000 ms, which is 1Hz update frequency
Definition: carlsim_datastructures.h:240
GPU_RUNTIME_BASE
#define GPU_RUNTIME_BASE
Definition: snn_definitions.h:143
RuntimeData_s::gAMPA
float * gAMPA
conductance of gAMPA
Definition: snn_datastructures.h:497
GroupConfigRT_s::OMEGA
float OMEGA
published by GroupConfig
Definition: snn_datastructures.h:413
NetworkConfigRT_s::dGABAb
double dGABAb
multiplication factor for decay time of GABAb
Definition: snn_datastructures.h:687
NetworkConfigRT_s::maxDelay
int maxDelay
maximum axonal delay in the gloabl network
Definition: snn_datastructures.h:627
SNN::startTesting
void startTesting(bool shallUpdateWeights=true)
enters a testing phase, where all weight updates are disabled
Definition: snn_manager.cpp:6180
NetworkConfigRT_s::sim_with_conductances
bool sim_with_conductances
Definition: snn_datastructures.h:662
ConnectConfig_s::mulSynSlow
float mulSynSlow
factor to be applied to either gNMDA or gGABAb
Definition: snn_datastructures.h:129
ConnectionInfo_s::grpSrc
int grpSrc
Definition: snn_datastructures.h:95
compConnectConfig_s::grpSrc
int grpSrc
Definition: snn_datastructures.h:180
MAX_TIME_SLICE
#define MAX_TIME_SLICE
Definition: snn_definitions.h:151
RuntimeData_s::lif_gain
float * lif_gain
Definition: snn_datastructures.h:485
RuntimeData_s::Izh_vt
float * Izh_vt
Definition: snn_datastructures.h:470
NUM_CPU_CORES
#define NUM_CPU_CORES
Definition: snn_definitions.h:141
KERNEL_WARN
#define KERNEL_WARN(formatc,...)
Definition: snn_definitions.h:111
NeuronMonitorCore::isRecording
bool isRecording()
returns recording status
Definition: neuron_monitor_core.h:72
USER
@ USER
User mode, for experiment-oriented simulations.
Definition: carlsim_datastructures.h:91
SNN::setNeuromodulator
void setNeuromodulator(int grpId, float baseDP, float tauDP, float base5HT, float tau5HT, float baseACh, float tauACh, float baseNE, float tauNE)
Sets baseline concentration and decay time constant of neuromodulators (DP, 5HT, ACh,...
Definition: snn_manager.cpp:596
GroupConfigRT_s::GAMMA
float GAMMA
published by GroupConfig
Definition: snn_datastructures.h:411
RuntimeData_s::Npost
unsigned short * Npost
stores the number of output connections from a neuron.
Definition: snn_datastructures.h:517
GroupConfigRT_s::numN
int numN
published by GroupConfig
Definition: snn_datastructures.h:384
GroupConfig_s::numN
int numN
Definition: snn_datastructures.h:309
SNN::updateConnectionMonitor
void updateConnectionMonitor(short int connId=ALL)
polls connection weights
Definition: snn_manager.cpp:6218
ThreadStruct_s
CPU multithreading subroutine (that takes single argument) struct argument.
Definition: snn_datastructures.h:722
GlobalNetworkConfig_s::maxDelay
int maxDelay
maximum axonal delay in the gloabl network
Definition: snn_datastructures.h:610
GroupConfigRT_s::isSpikeGenFunc
bool isSpikeGenFunc
published by GroupConfig
Definition: snn_datastructures.h:388
GroupConfigRT_s::hasExternalConnect
bool hasExternalConnect
published by GroupConfigMD
Definition: snn_datastructures.h:399
RuntimeData_s::extCurrent
float * extCurrent
Definition: snn_datastructures.h:478
GroupConfigRT_s::decay5HT
float decay5HT
decay rate for Serotonin, published by GroupConfig
Definition: snn_datastructures.h:435
GroupMonitorCore
GroupMonitor private core implementation.
Definition: group_monitor_core.h:63
SNN::setSpikeMonitor
SpikeMonitor * setSpikeMonitor(int gid, FILE *fid)
sets up a spike monitor registered with a callback to process the spikes, there can only be one Spike...
Definition: snn_manager.cpp:1181
CONN_USER_DEFINED
@ CONN_USER_DEFINED
Definition: snn_datastructures.h:74
RuntimeData_s::nUBuffer
float * nUBuffer
Definition: snn_datastructures.h:584
CONN_RANDOM
@ CONN_RANDOM
Definition: snn_datastructures.h:74
ConnectionInfo_s::nSrc
int nSrc
Definition: snn_datastructures.h:97
RuntimeData_s::nPoissonSpikes
unsigned int nPoissonSpikes
the total number of spikes of poisson neurons, used in CPU_MODE currently
Definition: snn_datastructures.h:459
RuntimeData_s::wt
float * wt
stores the weight change of a synaptic connection
Definition: snn_datastructures.h:523
RuntimeData_s::nVBuffer
float * nVBuffer
Definition: snn_datastructures.h:583
MAX_NEURON_MON_GRP_SZIE
#define MAX_NEURON_MON_GRP_SZIE
Definition: snn_definitions.h:164
GroupConfigRT_s::isLIF
bool isLIF
True = a LIF spiking group.
Definition: snn_datastructures.h:440
GroupConfigRT_s::Type
unsigned int Type
published by GroupConfig
Definition: snn_datastructures.h:383
NetworkConfigRT_s::sNMDA
double sNMDA
scaling factor for NMDA amplitude
Definition: snn_datastructures.h:684
CPU_RUNTIME_BASE
#define CPU_RUNTIME_BASE
Definition: snn_definitions.h:138
FORWARD_EULER
@ FORWARD_EULER
Definition: carlsim_datastructures.h:133
Grid3D::distY
float distY
Definition: carlsim_datastructures.h:548
group_monitor.h
GroupConfigRT_s::gEndN
int gEndN
published by GroupConfigMD
Definition: snn_datastructures.h:377
SNN::updateNeuronMonitor
void updateNeuronMonitor(int grpId=ALL)
copy required neuron state values from ??? buffer to ??? buffer
Definition: snn_manager.cpp:6589
RuntimeData_s::curSpike
bool * curSpike
Definition: snn_datastructures.h:491
GroupNeuromodulatorInfo_s::base5HT
float base5HT
baseline concentration of Serotonin
Definition: carlsim_datastructures.h:445
GroupConfigRT_s::TAU_MINUS_INV_INB
float TAU_MINUS_INV_INB
published by GroupConfig
Definition: snn_datastructures.h:415
GroupConfigMD_s::gGrpId
int gGrpId
Definition: snn_datastructures.h:336
ConnectionInfo_s::preSynId
int preSynId
Definition: snn_datastructures.h:102
GroupConfigRT_s::gGrpId
int gGrpId
published by GroupConfigMD
Definition: snn_datastructures.h:375
SNN::MINOR_VERSION
static const unsigned int MINOR_VERSION
minor release version, as in CARLsim 2.X
Definition: snn.h:138
HYBRID_MODE
@ HYBRID_MODE
model is run on CPU Core(s), GPU card(s) or both
Definition: carlsim_datastructures.h:116
GroupConfigRT_s::compNeighbors
int compNeighbors[4]
Definition: snn_datastructures.h:445
RuntimeData_s::postDelayInfo
DelayInfo * postDelayInfo
delay information
Definition: snn_datastructures.h:541
RuntimeData_s::timeTableD1
unsigned int * timeTableD1
firing table, only used in CPU_MODE currently
Definition: snn_datastructures.h:542
compConnectConfig_s
The configuration of a compartmental connection.
Definition: snn_datastructures.h:179
ConnectConfig_s::maxDelay
uint8_t maxDelay
Definition: snn_datastructures.h:122
ConnectConfig_s::grpDest
int grpDest
Definition: snn_datastructures.h:121
SNN::createGroup
int createGroup(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a group of Izhikevich spiking neurons.
Definition: snn_manager.cpp:251
GROUP_ID_MASK
#define GROUP_ID_MASK
Definition: snn_definitions.h:184
GlobalNetworkConfig_s::numNPois
int numNPois
number of poisson neurons in the global network
Definition: snn_datastructures.h:608
SNN::getGroupNeuromodulatorInfo
GroupNeuromodulatorInfo getGroupNeuromodulatorInfo(int grpId)
Definition: snn_manager.cpp:1900
RangeDelay
a range struct for synaptic delays
Definition: carlsim_datastructures.h:278
ConnectionInfo_s::initWt
float initWt
Definition: snn_datastructures.h:100
GroupNeuromodulatorInfo_s::baseACh
float baseACh
baseline concentration of Acetylcholine
Definition: carlsim_datastructures.h:446
PoissonRate::getNumNeurons
int getNumNeurons()
Returns the number of neurons for which to generate Poisson spike trains.
Definition: poisson_rate.cpp:222
SNN::~SNN
~SNN()
SNN Destructor.
Definition: snn_manager.cpp:85
RuntimeData_s::voltage
float * voltage
membrane potential for each regular neuron
Definition: snn_datastructures.h:464
GroupConfig_s::preferredNetId
int preferredNetId
Definition: snn_datastructures.h:307
neuron_monitor_core.h
SNN::setupNetwork
void setupNetwork()
build the network
Definition: snn_manager.cpp:772
GroupSTDPInfo_s::DELTA
float DELTA
the range of inhibitory LTD if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:431
SNN::stopTesting
void stopTesting()
exits a testing phase, making weight updates possible again
Definition: snn_manager.cpp:6207
GroupConfigRT_s::avgTimeScaleInv
float avgTimeScaleInv
published by GroupConfig
Definition: snn_datastructures.h:426
ThreadStruct_s::netId
int netId
Definition: snn_datastructures.h:724
RuntimeData_s::Npre_plastic
unsigned short * Npre_plastic
stores the number of plastic input connections to a neuron
Definition: snn_datastructures.h:515
RuntimeData_s::spikeCountD1Sec
unsigned int spikeCountD1Sec
the total number of spikes with axonal delay == 1 in 1 second, used in CPU_MODE currently
Definition: snn_datastructures.h:452
PARTITIONED_SNN
@ PARTITIONED_SNN
Definition: snn_datastructures.h:80
GET_CONN_GRP_ID
#define GET_CONN_GRP_ID(val)
Definition: snn_definitions.h:191
SNN::MAJOR_VERSION
static const unsigned int MAJOR_VERSION
major release version, as in CARLsim X
Definition: snn.h:137
GroupConfigRT_s::lGrpId
int lGrpId
published by GroupConfigMD
Definition: snn_datastructures.h:378
INTERVAL_10MS
@ INTERVAL_10MS
the update interval will be 10 ms, which is 100Hz update frequency
Definition: carlsim_datastructures.h:238
GroupConfigRT_s::compCouplingUp
float compCouplingUp
Definition: snn_datastructures.h:443
NEURON_MAX_FIRING_RATE
#define NEURON_MAX_FIRING_RATE
Definition: snn_definitions.h:147
GroupMonitor
Class GroupMonitor.
Definition: group_monitor.h:103
NetworkConfigRT_s::sim_with_stp
bool sim_with_stp
Definition: snn_datastructures.h:667
NetworkConfigRT_s::maxSpikesD1
unsigned int maxSpikesD1
the estimated maximum number of spikes with delay == 1 in a network
Definition: snn_datastructures.h:652
GlobalNetworkConfig_s::numNExcPois
int numNExcPois
number of excitatory poisson neurons in the global network
Definition: snn_datastructures.h:606
RuntimeData_s::Izh_C
float * Izh_C
Definition: snn_datastructures.h:467
LARGE_SPIKE_MON_GRP_SIZE
#define LARGE_SPIKE_MON_GRP_SIZE
Definition: snn_definitions.h:160
GroupConfigRT_s::base5HT
float base5HT
baseline concentration of Serotonin, published by GroupConfig
Definition: snn_datastructures.h:431
GroupNeuromodulatorInfo_s::baseNE
float baseNE
baseline concentration of Noradrenaline
Definition: carlsim_datastructures.h:447
GroupConfigRT_s::compCoupling
float compCoupling[4]
Definition: snn_datastructures.h:446
SNN::setNeuronMonitor
NeuronMonitor * setNeuronMonitor(int gid, FILE *fid)
sets up a neuron monitor registered with a callback to process the neuron state values,...
Definition: snn_manager.cpp:1221
GlobalNetworkConfig_s::numN1msDelay
int numN1msDelay
number of neurons with maximum out going axonal delay = 1 ms
Definition: snn_datastructures.h:611
GroupConfigRT_s::baseNE
float baseNE
baseline concentration of Noradrenaline, published by GroupConfig
Definition: snn_datastructures.h:433
RuntimeData_s::Izh_c
float * Izh_c
Definition: snn_datastructures.h:474
SNN::isPoissonGroup
bool isPoissonGroup(int gGrpId)
Definition: snn.h:622
RuntimeData_s::gNMDA_r
float * gNMDA_r
Definition: snn_datastructures.h:495
GroupMonitorCore::getGroupFileId
FILE * getGroupFileId()
returns a pointer to the group data file
Definition: group_monitor_core.h:137
NeuronMonitorCore::setNeuronFileId
void setNeuronFileId(FILE *neuronFileId)
sets pointer to Neuron file
Definition: neuron_monitor_core.cpp:177
ConnectionInfo_s::nDest
int nDest
Definition: snn_datastructures.h:98
GroupMonitorCore::isRecording
bool isRecording()
returns recording status
Definition: group_monitor_core.h:99
MAX_SIMULATION_TIME
#define MAX_SIMULATION_TIME
Definition: snn_definitions.h:152
ALL
#define ALL
CARLsim common definitions.
Definition: carlsim_definitions.h:55
RuntimeData_s::nIBuffer
float * nIBuffer
Definition: snn_datastructures.h:585
connection_monitor.h
SNN::getWeightMatrix2D
std::vector< std::vector< float > > getWeightMatrix2D(short int connId)
Definition: snn_manager.cpp:6232
GroupConfigRT_s::WithHomeostasis
bool WithHomeostasis
published by GroupConfig
Definition: snn_datastructures.h:397
RuntimeData_s::lastSpikeTime
int * lastSpikeTime
stores the last spike time of a neuron
Definition: snn_datastructures.h:519
compConnectConfig_s::connId
short int connId
Definition: snn_datastructures.h:181
NetworkConfigRT_s::simIntegrationMethod
integrationMethod_t simIntegrationMethod
integration method (forward-Euler or Fourth-order Runge-Kutta)
Definition: snn_datastructures.h:690
SNN::getGroupId
int getGroupId(std::string grpName)
Definition: snn_manager.cpp:1852
ComputingBackend
ComputingBackend
computing backend
Definition: carlsim_datastructures.h:147
SpikeBuffer::back
SpikeIterator back()
pointer to the back of the spike buffer
Definition: spike_buffer.cpp:224
NeuronMonitorCore
Definition: neuron_monitor_core.h:58
KERNEL_ERROR
#define KERNEL_ERROR(formatc,...)
Definition: snn_definitions.h:109
ConnectConfig_s::minDelay
uint8_t minDelay
Definition: snn_datastructures.h:123
NetworkConfigRT_s::numPreSynNet
int numPreSynNet
the total number of pre-connections in a network
Definition: snn_datastructures.h:648
GroupConfigRT_s::baseACh
float baseACh
baseline concentration of Acetylcholine, published by GroupConfig
Definition: snn_datastructures.h:432
CONN_GAUSSIAN
@ CONN_GAUSSIAN
Definition: snn_datastructures.h:74
RuntimeData_s::spikeCountExtRxD1
unsigned int spikeCountExtRxD1
the number of external spikes with axonal delay == 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:462
spike_buffer.h
SNN::getSpikeMonitor
SpikeMonitor * getSpikeMonitor(int grpId)
Returns pointer to existing SpikeMonitor object, NULL else.
Definition: snn_manager.cpp:1959
GroupConfigRT_s::avgTimeScale_decay
float avgTimeScale_decay
published by GroupConfig
Definition: snn_datastructures.h:425
NetworkConfigRT_s::numPostSynNet
int numPostSynNet
the total number of post-connections in a network
Definition: snn_datastructures.h:647
KERNEL_INFO
#define KERNEL_INFO(formatc,...)
Definition: snn_definitions.h:113
SNN::createSpikeGeneratorGroup
int createSpikeGeneratorGroup(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a spike generator group (dummy-neurons, not Izhikevich spiking neurons)
Definition: snn_manager.cpp:348
RuntimeData_s::grpDABuffer
float * grpDABuffer
Definition: snn_datastructures.h:577
Grid3D::numY
int numY
Definition: carlsim_datastructures.h:547
DelayInfo_s::delay_index_start
short delay_index_start
Definition: snn_datastructures.h:85
ConnectConfig_s::connProp
uint32_t connProp
Definition: snn_datastructures.h:131
GroupConfigRT_s::WithSTDP
bool WithSTDP
published by GroupConfig
Definition: snn_datastructures.h:390
RuntimeData_s::postSynapticIds
SynInfo * postSynapticIds
10 bit syn id, 22 bit neuron id, ordered based on delay
Definition: snn_datastructures.h:538
SNN::isSimulationWithNMDARise
bool isSimulationWithNMDARise()
Definition: snn.h:635
GroupConfigRT_s::Noffset
int Noffset
the offset of spike generator (poisson) neurons [0, numNPois), published by GroupConfigMD
Definition: snn_datastructures.h:400
RuntimeData_s::spikeCountSec
unsigned int spikeCountSec
the total number of spikes in 1 second, used in CPU_MODE currently
Definition: snn_datastructures.h:451
SNN::getConductanceAMPA
std::vector< float > getConductanceAMPA(int grpId)
Definition: snn_manager.cpp:1727
CPU_MODE
@ CPU_MODE
model is run on CPU core(s)
Definition: carlsim_datastructures.h:114
CPU_MEM
@ CPU_MEM
runtime data is allocated on CPU (main) memory
Definition: snn_datastructures.h:69
SNN::SNN
SNN(const std::string &name, SimMode preferredSimMode, LoggerMode loggerMode, int randSeed)
SNN Constructor.
Definition: snn_manager.cpp:76
ThreadStruct_s::snn_pointer
void * snn_pointer
Definition: snn_datastructures.h:723
SNN::setWeight
void setWeight(short int connId, int neurIdPre, int neurIdPost, float weight, bool updateWeightRange=false)
sets the weight value of a specific synapse
Definition: snn_manager.cpp:1286
MAX_NUM_COMP_CONN
#define MAX_NUM_COMP_CONN
Definition: carlsim_definitions.h:90
KERNEL_DEBUG
#define KERNEL_DEBUG(formatc,...)
Definition: snn_definitions.h:115
RuntimeData_s::connIdsPreIdx
short int * connIdsPreIdx
connectId, per synapse, presynaptic cumulative indexing
Definition: snn_datastructures.h:529
RuntimeData_s::firingTableD1
int * firingTableD1
Definition: snn_datastructures.h:545
ThreadStruct_s::startIdx
int startIdx
Definition: snn_datastructures.h:726
RuntimeData_s::spikeCountD2
unsigned int spikeCountD2
the total number of spikes with anxonal delay > 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:458
RuntimeData_s::cumulativePost
unsigned int * cumulativePost
Definition: snn_datastructures.h:526
UNKNOWN_STDP
@ UNKNOWN_STDP
Definition: carlsim_datastructures.h:163
SNN::getNeuronLocation3D
Point3D getNeuronLocation3D(int neurId)
Definition: snn_manager.cpp:1915
GroupNeuromodulatorInfo_s::decayNE
float decayNE
decay rate for Noradrenaline
Definition: carlsim_datastructures.h:451
SNN::setIntegrationMethod
void setIntegrationMethod(integrationMethod_t method, int numStepsPerMs)
Sets the integration method and the number of integration steps per 1ms simulation time step.
Definition: snn_manager.cpp:496
SYNAPSE_ID_MASK
#define SYNAPSE_ID_MASK
Definition: snn_definitions.h:185
SNN::scaleWeights
void scaleWeights(short int connId, float scale, bool updateWeightRange=false)
Definition: snn_manager.cpp:1005
RuntimeData_s::spikeCountD1
unsigned int spikeCountD1
the total number of spikes with anxonal delay == 1 in a simulation, used in CPU_MODE currently
Definition: snn_datastructures.h:457
RuntimeData_s::maxSynWt
float * maxSynWt
maximum synaptic weight for a connection
Definition: snn_datastructures.h:524
GroupConfigRT_s::numCompNeighbors
short numCompNeighbors
Definition: snn_datastructures.h:447
GroupConfigRT_s::lStartN
int lStartN
published by GroupConfigMD
Definition: snn_datastructures.h:379
neuron_monitor.h
STDPCurve
STDPCurve
STDP curves.
Definition: carlsim_datastructures.h:177
ThreadStruct_s::GtoLOffset
int GtoLOffset
Definition: snn_datastructures.h:728
SpikeMonitorCore::setLastUpdated
void setLastUpdated(long int lastUpdate)
sets timestamp of last SpikeMonitor update
Definition: spike_monitor_core.h:187
RuntimeData_s::Izh_k
float * Izh_k
Definition: snn_datastructures.h:468
Point3D
a point in 3D space
Definition: linear_algebra.h:57
RuntimeData_s::extFiringTableEndIdxD1
int * extFiringTableEndIdxD1
Definition: snn_datastructures.h:551
ConnectConfig_s::connectionMonitorId
int connectionMonitorId
Definition: snn_datastructures.h:130
compareSrcNeuron
bool compareSrcNeuron(const ConnectionInfo &first, const ConnectionInfo &second)
Definition: snn_manager.cpp:3215
SpikeMonitorCore::getSpikeFileId
FILE * getSpikeFileId()
returns a pointer to the spike file
Definition: spike_monitor_core.h:178
SNN::getSimTimeMs
int getSimTimeMs()
Definition: snn.h:582
NO_LOGGER_DIR_ERROR
#define NO_LOGGER_DIR_ERROR
Definition: error_code.h:94
GlobalNetworkConfig_s::numNInhPois
int numNInhPois
number of inhibitory poisson neurons in the global network
Definition: snn_datastructures.h:607
RuntimeData_s::synSpikeTime
int * synSpikeTime
stores the last spike time of a synapse
Definition: snn_datastructures.h:520
SpikeMonitorCore::isBufferBig
bool isBufferBig()
returns true if spike buffer is close to maxAllowedBufferSize
Definition: spike_monitor_core.cpp:467
GroupConfigRT_s::MaxDelay
int8_t MaxDelay
published by GroupConfigMD
Definition: snn_datastructures.h:401
SYN_PLASTIC
#define SYN_PLASTIC
Definition: carlsim_definitions.h:60
Grid3D
A struct to arrange neurons on a 3D grid (a primitive cubic Bravais lattice with cubic side length 1)
Definition: carlsim_datastructures.h:489
CUSTOM
@ CUSTOM
Custom mode, the user can set the location of all the file pointers.
Definition: carlsim_datastructures.h:95
SNN::connectCompartments
short int connectCompartments(int grpIdLower, int grpIdUpper)
Definition: snn_manager.cpp:216
RuntimeData_s::preSynapticIds
SynInfo * preSynapticIds
Definition: snn_datastructures.h:539
SNN::setHomeostasis
void setHomeostasis(int grpId, bool isSet, float homeoScale, float avgTimeScale)
Sets the homeostasis parameters. g is the grpID, enable=true(false) enables(disables) homeostasis,...
Definition: snn_manager.cpp:460
SNN::setSpikeRate
void setSpikeRate(int grpId, PoissonRate *spikeRate, int refPeriod)
Sets the Poisson spike rate for a group. For information on how to set up spikeRate,...
Definition: snn_manager.cpp:1270
Point3D::y
double y
Definition: linear_algebra.h:85
NetworkConfigRT_s::numNAssigned
int numNAssigned
number of total neurons assigned to the local network
Definition: snn_datastructures.h:641
SNN::setESTDP
void setESTDP(int grpId, bool isSet, STDPType type, STDPCurve curve, float alphaPlus, float tauPlus, float alphaMinus, float tauMinus, float gamma)
Set the spike-timing-dependent plasticity (STDP) for a neuron group.
Definition: snn_manager.cpp:620
GlobalNetworkConfig_s::numNExcReg
int numNExcReg
number of regular excitatory neurons in the global network
Definition: snn_datastructures.h:602
GroupConfigRT_s::gStartN
int gStartN
published by GroupConfigMD
Definition: snn_datastructures.h:376
GET_FIXED_PLASTIC
#define GET_FIXED_PLASTIC(a)
Definition: snn_definitions.h:207
NetworkConfigRT_s::sim_with_homeostasis
bool sim_with_homeostasis
Definition: snn_datastructures.h:666
RuntimeData_s::gGABAb
float * gGABAb
conductance of gGABAb
Definition: snn_datastructures.h:499
SpikeMonitorCore::getMode
SpikeMonMode getMode()
returns recording mode
Definition: spike_monitor_core.h:100
loggerMode_string
static const char * loggerMode_string[]
Definition: carlsim_datastructures.h:98
SILENT
@ SILENT
Silent mode, no output is generated.
Definition: carlsim_datastructures.h:94
EXECUTABLE_SNN
@ EXECUTABLE_SNN
Definition: snn_datastructures.h:81
ConnectConfig_s
The configuration of a connection.
Definition: snn_datastructures.h:119
RuntimeData_s::Izh_a
float * Izh_a
Definition: snn_datastructures.h:472
NetworkConfigRT_s::sim_in_testing
bool sim_in_testing
Definition: snn_datastructures.h:668
RuntimeData_s::lif_tau_m
int * lif_tau_m
parameters for a LIF spiking group
Definition: snn_datastructures.h:480
LONG_SPIKE_MON_DURATION
#define LONG_SPIKE_MON_DURATION
Definition: snn_definitions.h:159
GroupConfigRT_s::withParamModel_9
bool withParamModel_9
False = 4 parameter model; 1 = 9 parameter model.
Definition: snn_datastructures.h:439
ConnectionMonitorCore::setConnectFileId
void setConnectFileId(FILE *connFileId)
sets pointer to connection file
Definition: connection_monitor_core.cpp:414
ID_OVERFLOW_ERROR
#define ID_OVERFLOW_ERROR
Definition: error_code.h:96
LoggerMode
LoggerMode
Logger modes.
Definition: carlsim_datastructures.h:90
SpikeBuffer
Circular buffer for delivering spikes.
Definition: spike_buffer.h:65
GroupConfigRT_s::STP_tau_x_inv
float STP_tau_x_inv
published by GroupConfig
Definition: snn_datastructures.h:406
SNN::getNumSynapticConnections
int getNumSynapticConnections(short int connectionId)
gets number of connections associated with a connection ID
Definition: snn_manager.cpp:1948
RuntimeData_s::avgFiring
float * avgFiring
Definition: snn_datastructures.h:566
Grid3D::distZ
float distZ
Definition: carlsim_datastructures.h:548
GroupConfigRT_s::ALPHA_MINUS_INB
float ALPHA_MINUS_INB
published by GroupConfig
Definition: snn_datastructures.h:417
NetworkConfigRT_s::numGroups
int numGroups
number of local groups in this local network
Definition: snn_datastructures.h:655
GroupConfig_s::isSpikeGenerator
bool isSpikeGenerator
Definition: snn_datastructures.h:310
RadiusRF::radY
double radY
Definition: carlsim_datastructures.h:372
RuntimeData_s::grpDA
float * grpDA
Definition: snn_datastructures.h:571
ConnectConfig_s::connProbability
float connProbability
connection probability
Definition: snn_datastructures.h:134
RuntimeData_s::extFiringTableD1
int ** extFiringTableD1
external firing table, only used on GPU
Definition: snn_datastructures.h:548
GlobalNetworkConfig_s::numN
int numN
number of neurons in the global network
Definition: snn_datastructures.h:601
SNN::isSimulationWithCOBA
bool isSimulationWithCOBA()
Definition: snn.h:633
CONFIG_SNN
@ CONFIG_SNN
Definition: snn_datastructures.h:78
NeuronMonitorCore::setLastUpdated
void setLastUpdated(long int lastUpdate)
sets timestamp of last NeuronMonitor update
Definition: neuron_monitor_core.h:96
SNN::getSimTime
int getSimTime()
Definition: snn.h:580
SNN::setNeuronParameters
void setNeuronParameters(int grpId, float izh_a, float izh_a_sd, float izh_b, float izh_b_sd, float izh_c, float izh_c_sd, float izh_d, float izh_d_sd)
Sets the Izhikevich parameters a, b, c, and d of a neuron group.
Definition: snn_manager.cpp:504
NeuronMonitor
Definition: neuron_monitor.h:58
SNN::createGroupLIF
int createGroupLIF(const std::string &grpName, const Grid3D &grid, int neurType, int preferredPartition, ComputingBackend preferredBackend)
Creates a group of LIF spiking neurons.
Definition: snn_manager.cpp:301
SynInfo_s::nId
int nId
neuron id
Definition: snn_datastructures.h:91
GroupConfigRT_s::WithESTDPcurve
STDPCurve WithESTDPcurve
published by GroupConfig
Definition: snn_datastructures.h:395
GroupSTDPInfo_s::WithESTDP
bool WithESTDP
enable E-STDP flag
Definition: carlsim_datastructures.h:413
GroupConfigRT_s::lEndN
int lEndN
published by GroupConfigMD
Definition: snn_datastructures.h:380
SYN_FIXED
#define SYN_FIXED
Definition: carlsim_definitions.h:59
SNN::setHomeoBaseFiringRate
void setHomeoBaseFiringRate(int groupId, float baseFiring, float baseFiringSD)
Sets homeostatic target firing rate (enforced through homeostatic synaptic scaling)
Definition: snn_manager.cpp:480
NetworkConfigRT_s::numNReg
int numNReg
number of regular (spking) neurons
Definition: snn_datastructures.h:633
SNN::getGroupName
std::string getGroupName(int grpId)
Definition: snn_manager.cpp:1864
GroupSTDPInfo_s::TAU_PLUS_INV_INB
float TAU_PLUS_INV_INB
the inverse of tau plus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:423
GroupMonitorCore::setGroupFileId
void setGroupFileId(FILE *groupFileId)
sets pointer to group data file
Definition: group_monitor_core.cpp:222
ConnectionMonitor
Class ConnectionMonitor.
Definition: connection_monitor.h:148
SNN::isGroupWithHomeostasis
bool isGroupWithHomeostasis(int grpId)
returns whether group has homeostasis enabled (true) or not (false)
Definition: snn_manager.cpp:4893
ConnectConfig_s::type
conType_t type
Definition: snn_datastructures.h:133
RuntimeData_s::extFiringTableEndIdxD2
int * extFiringTableEndIdxD2
Definition: snn_datastructures.h:552
RuntimeData_s::memType
MemType memType
Definition: snn_datastructures.h:505
GroupSTDPInfo_s::WithESTDPcurve
STDPCurve WithESTDPcurve
the E-STDP curve
Definition: carlsim_datastructures.h:417
Grid3D::N
int N
Definition: carlsim_datastructures.h:550
GlobalNetworkConfig_s::numNInhReg
int numNInhReg
number of regular inhibitory neurons in the global network
Definition: snn_datastructures.h:603
RuntimeData_s::grpIds
short int * grpIds
Definition: snn_datastructures.h:530
SNN::exitSimulation
void exitSimulation(int val=1)
deallocates all dynamical structures and exits
Definition: snn_manager.cpp:994
GroupConfigRT_s::KAPPA
float KAPPA
published by GroupConfig
Definition: snn_datastructures.h:412
ConnectConfig_s::maxWt
float maxWt
Definition: snn_datastructures.h:124
SpikeBuffer::SpikeNode::grpId
int grpId
corresponding global group Id
Definition: spike_buffer.h:91
snn.h
GroupConfigRT_s::STP_A
float STP_A
published by GroupConfig
Definition: snn_datastructures.h:403
GroupConfigRT_s::ALPHA_PLUS_EXC
float ALPHA_PLUS_EXC
published by GroupConfig
Definition: snn_datastructures.h:409
TARGET_NMDA
#define TARGET_NMDA
Definition: carlsim_definitions.h:67
SpikeMonitorCore::setSpikeFileId
void setSpikeFileId(FILE *spikeFileId)
sets pointer to spike file
Definition: spike_monitor_core.cpp:359
NetworkConfigRT_s::sim_with_fixedwts
bool sim_with_fixedwts
Definition: snn_datastructures.h:661
GlobalNetworkConfig_s::numNReg
int numNReg
number of regular (spking) neurons in the global network
Definition: snn_datastructures.h:604
GroupNeuromodulatorInfo_s::decay5HT
float decay5HT
decay rate for Serotonin
Definition: carlsim_datastructures.h:449
RuntimeData_s::stpu
float * stpu
Definition: snn_datastructures.h:512
ConnectionMonitorCore::writeConnectFileSnapshot
void writeConnectFileSnapshot(int simTimeMs, std::vector< std::vector< float > > wts)
writes each snapshot to connect file
Definition: connection_monitor_core.cpp:517
GroupConfigRT_s::LtoGOffset
int LtoGOffset
published by GroupConfigMD
Definition: snn_datastructures.h:381
SNN
Contains all of CARLsim's core functionality.
Definition: snn.h:114
ConnectionInfo_s::srcGLoffset
int srcGLoffset
Definition: snn_datastructures.h:99
SNN::getConductanceGABAb
std::vector< float > getConductanceGABAb(int grpId)
Definition: snn_manager.cpp:1773
SNN::getWeightRange
RangeWeight getWeightRange(short int connId)
returns RangeWeight struct of a connection
Definition: snn_manager.cpp:2002
SynInfo_s::gsId
int gsId
group id and synapse id
Definition: snn_datastructures.h:90
SpikeBuffer::front
SpikeIterator front(int stepOffset=0)
pointer to the front of the spike buffer
Definition: spike_buffer.cpp:223
RuntimeData_s::lif_bias
float * lif_bias
Definition: snn_datastructures.h:486
RuntimeData_s::grpNE
float * grpNE
Definition: snn_datastructures.h:574
ConnectConfig_s::numberOfConnections
int numberOfConnections
Definition: snn_datastructures.h:136
GroupConfig_s
The configuration of a group.
Definition: snn_datastructures.h:301
RuntimeData_s::nSpikeCnt
int * nSpikeCnt
homeostatic plasticity variables
Definition: snn_datastructures.h:561
GroupNeuromodulatorInfo_s
A struct for retrieving neuromodulator information of a group.
Definition: carlsim_datastructures.h:443
RuntimeData_s::extFiringTableD2
int ** extFiringTableD2
external firing table, only used on GPU
Definition: snn_datastructures.h:549
NetworkConfigRT_s::numGroupsAssigned
int numGroupsAssigned
number of groups assigned to this local network
Definition: snn_datastructures.h:656
TARGET_AMPA
#define TARGET_AMPA
Definition: carlsim_definitions.h:66
CONN_FULL_NO_DIRECT
@ CONN_FULL_NO_DIRECT
Definition: snn_datastructures.h:74
ConnectionInfo_s::maxWt
float maxWt
Definition: snn_datastructures.h:101
DelayInfo_s::delay_length
short delay_length
Definition: snn_datastructures.h:86
GroupSTDPInfo_s::WithESTDPtype
STDPType WithESTDPtype
the type of E-STDP (STANDARD or DA_MOD)
Definition: carlsim_datastructures.h:415
RuntimeData_s::gNMDA_d
float * gNMDA_d
Definition: snn_datastructures.h:496
IS_INHIBITORY_TYPE
#define IS_INHIBITORY_TYPE(type)
Definition: carlsim_definitions.h:80
UpdateInterval
UpdateInterval
Update frequency for weights.
Definition: carlsim_datastructures.h:237
SNN::isExcitatoryGroup
bool isExcitatoryGroup(int gGrpId)
Definition: snn.h:620
GroupConfigRT_s::TAU_PLUS_INV_INB
float TAU_PLUS_INV_INB
published by GroupConfig
Definition: snn_datastructures.h:414
GroupMonitorCore::getLastUpdated
int getLastUpdated()
returns timestamp of last GroupMonitor update
Definition: group_monitor_core.h:143
GroupConfigRT_s::FixedInputWts
bool FixedInputWts
published by GroupConfigMD
Definition: snn_datastructures.h:398
NeuronMonitorCore::getNeuronFileId
FILE * getNeuronFileId()
returns a pointer to the neuron state file
Definition: neuron_monitor_core.h:90
SNN::getDelays
uint8_t * getDelays(int gGrpIdPre, int gGrpIdPost, int &numPreN, int &numPostN)
Returns the delay information for all synaptic connections between a pre-synaptic and a post-synaptic...
Definition: snn_manager.cpp:1801
MAX_SPIKE_MON_BUFFER_SIZE
#define MAX_SPIKE_MON_BUFFER_SIZE
Definition: snn_definitions.h:158
GroupConfigRT_s::decayNE
float decayNE
decay rate for Noradrenaline, published by GroupConfig
Definition: snn_datastructures.h:437
GroupConfig_s::isLIF
bool isLIF
Definition: snn_datastructures.h:312
RuntimeData_s::grp5HTBuffer
float * grp5HTBuffer
Definition: snn_datastructures.h:578
NetworkConfigRT_s::sim_with_NMDA_rise
bool sim_with_NMDA_rise
a flag to inform whether to compute NMDA rise time
Definition: snn_datastructures.h:679
ConnectionMonitorCore::init
void init()
Definition: connection_monitor_core.cpp:84
SNN::setISTDP
void setISTDP(int grpId, bool isSet, STDPType type, STDPCurve curve, float ab1, float ab2, float tau1, float tau2)
Set the inhibitory spike-timing-dependent plasticity (STDP) with anti-hebbian curve for a neuron grou...
Definition: snn_manager.cpp:653
SNN::setConnectionMonitor
ConnectionMonitor * setConnectionMonitor(int grpIdPre, int grpIdPost, FILE *fid)
sets up a network monitor registered with a callback to process the spikes.
Definition: snn_manager.cpp:1124
SpikeMonitorCore::getLastUpdated
long int getLastUpdated()
returns timestamp of last SpikeMonitor update
Definition: spike_monitor_core.h:184
GroupSTDPInfo_s::BETA_LTP
float BETA_LTP
the amplitude of inhibitory LTP if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:428
GroupConfig_s::withCompartments
bool withCompartments
Definition: snn_datastructures.h:313
RoutingTableEntry_s
runtime spike routing table entry
Definition: snn_datastructures.h:701
RuntimeData_s::grp5HT
float * grp5HT
Definition: snn_datastructures.h:572
GroupSTDPInfo_s::ALPHA_MINUS_INB
float ALPHA_MINUS_INB
the amplitude of alpha minus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:426
GlobalNetworkConfig_s::simNumStepsPerMs
int simNumStepsPerMs
number of steps per 1 millisecond
Definition: snn_datastructures.h:615
GroupSTDPInfo_s::LAMBDA
float LAMBDA
the range of inhibitory LTP if the pulse I-STDP curve is used
Definition: carlsim_datastructures.h:430
SNN::getConnectId
short int getConnectId(int grpIdPre, int grpIdPost)
find connection ID based on pre-post group pair, O(N)
Definition: snn_manager.cpp:1703
RuntimeData_s::firingTableD2
int * firingTableD2
Definition: snn_datastructures.h:546
RuntimeData_s::gGABAb_r
float * gGABAb_r
Definition: snn_datastructures.h:500
ConnectionInfo_s
Definition: snn_datastructures.h:94
GroupConfigMD_s
Definition: snn_datastructures.h:328
RuntimeData_s::spikeCountLastSecLeftD2
unsigned int spikeCountLastSecLeftD2
the nubmer of spike left in the last second, used in CPU_MODE currently
Definition: snn_datastructures.h:460
GroupConfigRT_s::ALPHA_MINUS_EXC
float ALPHA_MINUS_EXC
published by GroupConfig
Definition: snn_datastructures.h:410
GroupConfigRT_s::STP_tau_u_inv
float STP_tau_u_inv
published by GroupConfig
Definition: snn_datastructures.h:405
SpikeBuffer::reset
void reset(int minDelay, int maxDelay)
Reset buffer data.
Definition: spike_buffer.cpp:221
Grid3D::numZ
int numZ
Definition: carlsim_datastructures.h:547
ThreadStruct_s::lGrpId
int lGrpId
Definition: snn_datastructures.h:725
GroupConfigRT_s::compCouplingDown
float compCouplingDown
Definition: snn_datastructures.h:444
NetworkConfigRT_s::simNumStepsPerMs
int simNumStepsPerMs
number of steps per 1 millisecond
Definition: snn_datastructures.h:691
GroupConfigRT_s::TAU_MINUS_INV_EXC
float TAU_MINUS_INV_EXC
published by GroupConfig
Definition: snn_datastructures.h:408
ConnectionMonitorCore::getUpdateTimeIntervalSec
int getUpdateTimeIntervalSec()
Definition: connection_monitor_core.h:130
MAX_NET_PER_SNN
#define MAX_NET_PER_SNN
Definition: snn_definitions.h:133
spike_monitor_core.h
SNN::getGroupGrid3D
Grid3D getGroupGrid3D(int grpId)
Definition: snn_manager.cpp:1845
NetworkConfigRT_s::numN
int numN
number of neurons in th local network
Definition: snn_datastructures.h:630
GET_CONN_NEURON_ID
#define GET_CONN_NEURON_ID(val)
Definition: snn_definitions.h:189
GlobalNetworkConfig_s::timeStep
float timeStep
inverse of simNumStepsPerMs
Definition: snn_datastructures.h:616
GroupConfigRT_s::STP_U
float STP_U
published by GroupConfig
Definition: snn_datastructures.h:404
DEVELOPER
@ DEVELOPER
Developer mode, for developing and debugging code.
Definition: carlsim_datastructures.h:92
RuntimeData_s::grpACh
float * grpACh
Definition: snn_datastructures.h:573
SNN::setSTP
void setSTP(int grpId, bool isSet, float STP_U, float STP_tau_u, float STP_tau_x)
Sets STP params U, tau_u, and tau_x of a neuron group (pre-synaptically) CARLsim implements the short...
Definition: snn_manager.cpp:699
SpikeBuffer::SpikeIterator
Iterator to loop over the scheduled spikes at a certain delay.
Definition: spike_buffer.h:97
GroupSTDPInfo_s::GAMMA
float GAMMA
the turn over point if the timing-based E-STDP curve is used
Definition: carlsim_datastructures.h:427
GlobalNetworkConfig_s::numComp
int numComp
number of compartmental neurons
Definition: snn_datastructures.h:605
GroupConfig_s::grpName
std::string grpName
Definition: snn_datastructures.h:306
GroupConfigRT_s::BETA_LTP
float BETA_LTP
published by GroupConfig
Definition: snn_datastructures.h:418
RadiusRF
A struct to specify the receptive field (RF) radius in 3 dimensions.
Definition: carlsim_datastructures.h:363
MAX_SYN_DELAY
#define MAX_SYN_DELAY
Definition: snn_definitions.h:127
ANY
#define ANY
used for create* method to specify any GPU or a specific GPU
Definition: carlsim_definitions.h:56
TARGET_GABAa
#define TARGET_GABAa
Definition: carlsim_definitions.h:68
GroupConfigRT_s::WithSTP
bool WithSTP
published by GroupConfig
Definition: snn_datastructures.h:389
SNN::getDelayRange
RangeDelay getDelayRange(short int connId)
returns the RangeDelay struct of a connection
Definition: snn_manager.cpp:1794
GET_CONN_SYN_ID
#define GET_CONN_SYN_ID(val)
Definition: snn_definitions.h:190
NetworkConfigRT_s::timeStep
float timeStep
inverse of simNumStepsPerMs
Definition: snn_datastructures.h:692
GroupSTDPInfo_s::WithISTDP
bool WithISTDP
enable I-STDP flag
Definition: carlsim_datastructures.h:414
RadiusRF::radZ
double radZ
Definition: carlsim_datastructures.h:372
GroupConfigRT_s::homeostasisScale
float homeostasisScale
published by GroupConfig
Definition: snn_datastructures.h:427
GroupConfigRT_s::DELTA
float DELTA
published by GroupConfig
Definition: snn_datastructures.h:421
GroupConfigRT_s::avgTimeScale
float avgTimeScale
published by GroupConfig
Definition: snn_datastructures.h:424
GroupConfigRT_s::BETA_LTD
float BETA_LTD
published by GroupConfig
Definition: snn_datastructures.h:419
GroupSTDPInfo_s::WithISTDPcurve
STDPCurve WithISTDPcurve
the I-STDP curve
Definition: carlsim_datastructures.h:418
SNN::updateSpikeMonitor
void updateSpikeMonitor(int grpId=ALL)
copy required spikes from firing buffer to spike buffer
Definition: snn_manager.cpp:6478
RuntimeData_s::lif_vReset
float * lif_vReset
Definition: snn_datastructures.h:484
ConnectConfig_s::connRadius
RadiusRF connRadius
Definition: snn_datastructures.h:127
ConnectConfig_s::conn
ConnectionGeneratorCore * conn
Definition: snn_datastructures.h:132
STDPType
STDPType
STDP flavors.
Definition: carlsim_datastructures.h:160
GroupConfigRT_s::WithISTDP
bool WithISTDP
published by GroupConfig
Definition: snn_datastructures.h:392
RuntimeData_s::cumulativePre
unsigned int * cumulativePre
Definition: snn_datastructures.h:527
SNN::getNumConnections
int getNumConnections()
Definition: snn.h:565
Point3D::x
double x
Definition: linear_algebra.h:85
RuntimeData_s::gGABAb_d
float * gGABAb_d
Definition: snn_datastructures.h:501
RuntimeData_s::allocated
bool allocated
true if all data has been allocated
Definition: snn_datastructures.h:506
Grid3D::offsetY
float offsetY
Definition: carlsim_datastructures.h:549
CONN_ONE_TO_ONE
@ CONN_ONE_TO_ONE
Definition: snn_datastructures.h:74
CPU_CORES
@ CPU_CORES
Definition: carlsim_datastructures.h:148
GroupSTDPInfo_s::ALPHA_PLUS_INB
float ALPHA_PLUS_INB
the amplitude of alpha plus, if the exponential I-STDP curve is used
Definition: carlsim_datastructures.h:425
GroupMonitorCore::pushData
void pushData(int time, float data)
inserts group data (time, value) into the vectors
Definition: group_monitor_core.cpp:110
GroupNeuromodulatorInfo_s::decayDP
float decayDP
decay rate for Dopaamine
Definition: carlsim_datastructures.h:448
Point3D::z
double z
Definition: linear_algebra.h:85