?? network.h
字號:
/*************************************************************************** network.h - description ------------------- begin : Thu Dec 13 2001 copyright : (C) 2001-2005 by Matt Grover, Rudiger Koch email : mgrover@amygdala.org ***************************************************************************//*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/#ifndef NETWORK_H#define NETWORK_H#include <pthread.h>#include <map>#include <vector>#include <queue>#include <string>#include <stdio.h>#include <amygdala/dendrite.h>#include <amygdala/topology.h>#include <amygdala/axon.h>#include <amygdala/amygdalaclass.h>namespace Amygdala {using std::map;using std::string;using std::queue;using std::exception;class Neuron;class SpikingNeuron;class InputNeuron;class SpikeInput;class AxonNode;class Synapse;class FunctionLookup;class Trainer;class VisualStub;struct NeuronTopologyPtr { Neuron* neuron; Topology* topology;};/** @class Network network.h amygdala/network.h * @brief Manages the simulation engine. * * This is a singleton class that controls the simulation. * Network acts as the master container for the SNN and has * methods for starting and stopping the simulation. * A Network object is created at startup of the program and * other Network objects cannot be created. To get a pointer to the Network object use * Network::GetNetworkRef(). * @author Matt Grover <mgrover@amygdala.org> * @author Rudiger Koch <rkoch@rkoch.org> */class Network: public AmygdalaClass {private: /** Users never create a Network object themselves. See class description. */ Network(); Network(Network &); Network(int & argc, char * argv[]); /** this function is to speed up things for very small NNs */ void SimpleRun(); /** a thread goes to sleep here. The last thread cannot go to sleep. In case we are the last, * false is returned. */ bool ThreadSleep(unsigned int simTime); /** wake up all sleeping network threads */ void ThreadWakeUp(); /** turn on listening on port for incoming spikes * @param port the UDP port to listen at */ void UdpListener(int port); static void ShiftArgs(int pos, int & argc, char* argv[]); /** Schedule a call to SpikingNeuron::ProcessInput() for a neuron * during the current time step. Used to identify Neurons that have input * queued that has to be processed. This should only be called from * Dendrite::SetNrnTrigger(). * @param nrn The Neuron requesting processing. */ void ScheduleNeuronProcess(SpikingNeuron* nrn) { processQ.push_back(nrn); } /** Increment the offset index used for delayedSpikeQ. This is used as * part of the delayed spike scheduler. */ inline void IncrementDelayOffset(); /** Send spikes to neurons through the synapses stored in * delayedSpikeQueue for the current time step. */ void SendDelayedSpikes(); /** Initialize the delayed spike queue. */ void InitializeDelayedSpikeQ(); /** Set the maximum spike delay value that the delayed spike queue * will handle. * @param _delay The delay value. */ void SetMaxSpikeDelay(AmTimeInt _delay); /** Add a neuron to the network. This is called automatically when * neurons are added to a topology. It should not be called outside of * that process. */ void AddNeuron(Neuron* nrn, Topology* top); /** Add a topology object to the network. This is called automatically * whenever a topology object is created. It should not be called outside * of that process. */ void AddTopology(Topology* top); public: ~Network(); /** Add a SpikeInput object to the network. This is called automatically * when SpikeInput objects are created. */ void AddSpikeInput(SpikeInput* si); /** Run the simulation for a period of time * @param maxRunTime the maximum time the simulation will run. */ void Run(AmTimeInt maxRunTime); /** Stop the network before maxRunTime is reached */ void Stop(); /** @return A reference to the Network object */ static Network* GetNetworkRef(); unsigned int GetMaxRunTime(); /** Save to a file. The file will get a .amg appended if it doesn't end on .amg. * It will be a gzipped file for each NetworkPartition. All gzipped files will be tarred. * @throw runtime_error If we cannot open the file, the temporary files or run * out of disk space or an Network is running * @see NetLoader::SaveXML() */ void Save(std::string filename, bool compress=true); /** Load an Amygdala network * @see NetLoader::LoadXML() */ void Load(std::string filename, bool loadPhysProps=true); /** Set the size of the simulation time steps. This * must be set before Neurons are added. * @param stepSize Time step size in microseconds. The default * is 100 microseconds. */ static void SetTimeStepSize(AmTimeInt stepSize) { simStepSize = stepSize; } /** @return The size of the time step in microseconds. */ static AmTimeInt TimeStepSize() { return simStepSize; } /** The thread scheduler for the network. For internal use only. Use Network::Run() instead. * @see Network::Run() */ void Schedule(); /** @return The current simulation time in microseconds */ static AmTimeInt SimTime() { return simTime; } /** Set the number of threads to run. This function must not be called on a running simulation * It impacts performance badly if set to a larger number than that of CPUs on the system * @param threads the number of threads to run */ void SetNumThreads(unsigned int threads); /** Initialize the network and create the Network object. * This must be done before anything else is done with the network. * @param argc argc from main() * @param argv argv from main() */ static void Init(int & argc, char * argv[]); /** Clean up the network after processing has completed. This will * stop all threads and free the memory. */ static void Cleanup(); /** @return a pointer to the GLDebugger object */ VisualStub * GetVisualStub(); /** @return The largest neuron ID currently in the network. */ static AmIdInt MaxNeuronId() { return (nextNeuronId - 1); } /** @return An unused neuron ID. */ static AmIdInt GetNewNeuronId() { return nextNeuronId++; } /** @return Ptr to the network's FunctionLookup. */ FunctionLookup* GetFunctionLookup() const { return funcLookup; } /************************************************************************ * Functions from NetworkPartition merge ************************************************************************/ /** Schedule a neuron to send a spike. * @param spikeTime Time that the neuron should spike. * @param nrn The neuron scheduled to spike. */ void ScheduleSpike(AmTimeInt spikeTime, SpikingNeuron* nrn); /** Schedule a neuron to send a spike. * @param spikeTime Time that the neuron should spike. * @param nrn The neuron scheduled to spike. */ void ScheduleSpike(AmTimeInt spikeTime, InputNeuron* nrn); /** Schedule the transmission of a spike down an axon. This * may be done in order to implement spike batching or to * model transmission delays. This is normally called from * Neuron. * @param axon The axon vector from a Neuron. A spike will * be scheduled to cross each Synapse after the delay time has * passed. Delay times are stored in Synapse and set when * Neurons are connected together. * @see Neuron::SendSpike() */ void ScheduleSpikeDelay(Axon* axon); /** @return Pointer to the SpikeInput object that is being used. */ SpikeInput* GetSpikeInput(unsigned int idx) { return spikeInputVec[idx]; } typedef std::vector<SpikeInput*>::const_iterator spikeinput_iterator; spikeinput_iterator SpikeInput_begin() { return spikeInputVec.begin(); } spikeinput_iterator SpikeInput_end() { return spikeInputVec.end(); } /** Execute exactly one timeStep */ void TimeStep(); /** Get the Network ready to run. This must be called after the neurons * have been added to the network and before Run() or TimeStep(). */ void InitRun(); //TODO: These should probably be moved inside the cpp file so that // a check for the neuronId can be done before trying to find in // in the map (which will insert an element if the key is not found). /** @return Ptr to a Neuron with id neuronId */ Neuron* GetNeuron(AmIdInt neuronId) { return nrnMap[neuronId].neuron; } /** @return Ptr to the Topology object that contains neuron neuronId */ Topology* GetTopology(AmIdInt neuronId) { return nrnMap[neuronId].topology; } /** @return Ptr to the Topology object that contains neuron nrn */ Topology* GetTopology(Neuron* nrn); /** @return Ptr to the Topology object with name topologyName */ Topology* GetTopology(std::string& topologyName) { return topologyMap[topologyName]; } /** @return NeuronTopologyPtr that contains neuron neuronId */ NeuronTopologyPtr& GetNeuronTopology(AmIdInt neuronId) { return nrnMap[neuronId]; } /** @return Ptr to Trainer with name trainerName */ Trainer* GetTrainer(const std::string& trainerName) { return trainerMap[trainerName]; } void AddTrainer(const std::string& trainerName, Trainer* t); void SetTrainerCallback(Trainer* t, AmTimeInt callbackTime); typedef std::map<std::string, Topology*>::const_iterator const_iterator; const_iterator begin() const { return topologyMap.begin(); } const_iterator end() const { return topologyMap.end(); } typedef std::map<std::string, Trainer*>::const_iterator const_iterator_trainer; const_iterator_trainer trainer_begin() const { return trainerMap.begin(); } const_iterator_trainer trainer_end() const { return trainerMap.end(); }protected: // Protected attributes /** The one and only network object */ static Network * theNetwork; /** the maximum time the networks of this network run */ AmTimeInt maxRunTime; FunctionLookup* funcLookup; /** SpikeRequest is used to keep track of scheduled spikes in the * event queue. The priority_queue from the STL ranks entries * based on the < operator (defined below). The ranking will be * in order of spikeTime, requestTime, and requestOrder. */ struct SpikeRequest { AmTimeInt spikeTime; // Desired time of spike AmTimeInt requestTime; // Time SpikeRequest was entered in queue unsigned int requestOrder; // Entry number within a given time step. SpikingNeuron* requestor; // Neuron scheduling spike // operator< overloaded to make the priority_queue happy bool operator<(const SpikeRequest& sr) const { if(spikeTime != sr.spikeTime) { return spikeTime>sr.spikeTime; } else if(requestTime != sr.requestTime) { return requestTime>sr.requestTime; } else { return requestOrder<sr.requestOrder; } } }; struct InputSpikeRequest { AmTimeInt spikeTime; // Desired time of spike AmTimeInt requestTime; // Time SpikeRequest was entered in queue unsigned int requestOrder; // Entry number within a given time step. InputNeuron* requestor; // Neuron scheduling spike // operator< overloaded to make the priority_queue happy bool operator<(const InputSpikeRequest& sr) const { if(spikeTime != sr.spikeTime) { return spikeTime>sr.spikeTime; } else if(requestTime != sr.requestTime) { return requestTime>sr.requestTime; } else { return requestOrder<sr.requestOrder; } } }; struct TrainerCallback { Trainer* trainer; AmTimeInt callbackTime; bool operator<(const TrainerCallback& rhs) const { // return > instead of < to force the smallest times to the top of the queue return callbackTime>rhs.callbackTime; } }; std::priority_queue<SpikeRequest> eventQ; // Main event queue std::priority_queue<InputSpikeRequest> inputQ; // Queue of inputs into network std::priority_queue<TrainerCallback> trainerCallbackQ; std::vector< std::vector<AxonNode*> > delayedSpikeQ; std::vector< SpikingNeuron* > processQ; // Queue of neurons that have input processing // to do during the current time step std::map<AmIdInt, NeuronTopologyPtr> nrnMap; std::map<std::string, Topology*> topologyMap; std::vector<SpikeInput*> spikeInputVec; std::map<std::string, Trainer*> trainerMap; AmTimeInt currSpikeDelayOffset; AmTimeInt maxOffset; AmTimeInt maxSpikeDelay; Synapse* maxSpikeDelaySyn; // TODO: What is this? unsigned int spikeBatchCount; AmTimeInt nextInputTime; unsigned int eventRequestCount; // Counter for SpikeRequest.requestOrder /** reference time for the Network. NetworkPartition::simTime may not exceed this for more * than one timeStep */ static AmTimeInt simTime;/** number of threads that went to sleep after emptying their spike input buffers. * This doesn't mean that such a thread is simply waiting for the next timestep. If * more requests become available through the still active threads a sleeping thread * must be woken up. */ int sleepers;/** number of threads. This should be set to a value no greater than the number of CPUs on the * system. Default is 1 */ unsigned int numThreads; pthread_mutex_t mut_sleeper; pthread_mutex_t mut_simtime; pthread_cond_t cond_sleeper;/** contains all the thread handles */ std::vector<pthread_t*> threadHandles; bool running;/** pointer to a visualizer object */ VisualStub * visualStub; static AmTimeInt simStepSize; static AmIdInt nextNeuronId; friend void Dendrite::SetNrnTrigger(); friend void Axon::BuildNodes(); friend class Topology; friend class TFactory; friend class NetLoader;};inline void Network::IncrementDelayOffset(){ if (currSpikeDelayOffset >= maxOffset) { currSpikeDelayOffset = 0; } else { ++currSpikeDelayOffset; }}} // namespace Amygdala#endif
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -