# Clone repository
git clone https://github.com/MASSIVEMAGNETICS/brain_ai.git
cd brain_ai
# Install base requirements
pip install -r requirements.txt
# Optional: Install GPU support (choose one)
pip install torch # For PyTorch GPU acceleration
# OR
pip install tensorflow # For TensorFlow GPU accelerationfrom brain_simulation_advanced import MultiCompartmentNeuron
# Create neuron with realistic compartments
neuron = MultiCompartmentNeuron(
neuron_id=0,
n_dendrites=5, # 5 dendritic branches
n_axon_segments=3 # 3 axon segments
)
# Simulate with synaptic input to dendrite
synaptic_inputs = {neuron.dendrite_ids[0]: 50.0} # Current injection
for step in range(1000):
spiked = neuron.update(dt_ms=0.1, synaptic_inputs=synaptic_inputs)
if spiked:
print(f"Spike at {step * 0.1} ms!")from brain_simulation_advanced import STDPSynapse, NeurotransmitterType
# Create synapse with dopamine neurotransmitter
synapse = STDPSynapse(
pre_neuron_id=0,
post_neuron_id=1,
weight=1.0,
neurotransmitter=NeurotransmitterType.DOPAMINE
)
# Simulate spike-timing-dependent learning
current_time = 0.0
for trial in range(10):
# Pre spikes before Post → LTP (strengthening)
synapse.update_stdp(current_time, pre_spiked=True, post_spiked=False)
synapse.update_stdp(current_time + 5.0, pre_spiked=False, post_spiked=True)
current_time += 50.0
print(f"Final weight: {synapse.weight:.4f}")from brain_simulation_advanced import GPUAcceleratedNetwork
# Create network with 100,000 neurons
gpu_net = GPUAcceleratedNetwork(
n_neurons=100000,
use_pytorch=True, # Use PyTorch backend
device='auto' # Auto-detect GPU
)
# Create sparse random connectivity (1% connection probability)
gpu_net.create_sparse_connectivity(connection_probability=0.01)
# Run simulation
for step in range(1000):
spike_mask = gpu_net.simulate_step_gpu(dt_ms=0.1)
n_spikes = gpu_net.get_spike_count(spike_mask)
if step % 100 == 0:
print(f"Step {step}: {n_spikes} spikes")from brain_simulation_advanced import AttentionMechanism
import numpy as np
# Create attention mechanism for 1000 neurons
attention = AttentionMechanism(n_neurons=1000)
# Simulate baseline neural activity
activity = np.random.rand(1000) * 0.5
# Apply spatial attention to neurons 100-150
attended_neurons = list(range(100, 151))
attention.set_spatial_attention(attended_neurons, gain=3.0)
# Modulate activity
modulated_activity = attention.apply_attention(activity)
# Compare attended vs unattended regions
print(f"Attended region: {modulated_activity[100:151].mean():.3f}")
print(f"Unattended region: {modulated_activity[:100].mean():.3f}")from brain_simulation_advanced import WorkingMemoryCircuit
# Create working memory with 7 slots (Miller's Law)
wm = WorkingMemoryCircuit(n_memory_units=7)
# Encode items
wm.encode(0, strength=1.0) # Item 0
wm.encode(2, strength=0.8) # Item 2
wm.encode(5, strength=0.9) # Item 5
# Maintain through recurrent activity
for _ in range(100):
wm.maintain()
# Retrieve items
for i in range(7):
strength = wm.retrieve(i)
if strength > 0.01:
print(f"Item {i}: strength = {strength:.3f}")from brain_simulation_advanced import ReinforcementLearningModule
# Create RL module
rl = ReinforcementLearningModule(
n_states=10,
n_actions=4
)
# Training loop
for episode in range(100):
state = 0
for step in range(20):
# Select action
action = rl.select_action(state, epsilon=0.1)
# Environment step (example)
next_state = (state + 1) % 10
reward = 1.0 if next_state == 9 else 0.0
# Learn from experience
td_error = rl.compute_td_error(state, action, reward, next_state)
rl.update_q_value(state, action, td_error)
state = next_state
# Check learned policy
print(f"Q-values for state 0: {rl.q_values[0, :]}")
print(f"Best action: {rl.q_values[0, :].argmax()}")from brain_simulation_advanced import DecisionMakingNetwork
import numpy as np
# Create decision network (2 options)
decision_net = DecisionMakingNetwork(n_options=2)
# Reset for new decision
decision_net.reset()
# Accumulate evidence
evidence = np.array([0.6, 0.4]) # Option 0 has more evidence
time_ms = 0.0
while not decision_net.decision_made and time_ms < 1000:
decision_net.accumulate_evidence(evidence, dt_ms=1.0)
time_ms += 1.0
# Get decision
choice = decision_net.get_decision()
print(f"Decision: Option {choice}")
print(f"Reaction time: {time_ms:.1f} ms")from brain_simulation_advanced import SensorimotorLoop
import numpy as np
# Create sensorimotor system
loop = SensorimotorLoop()
# Simulate camera input
camera_image = np.random.rand(64, 64) # 64x64 grayscale image
# Define simple neural processing
def neural_network(sensory_activity):
# Simple pass-through for demo
n_motor = loop.motor_interface.n_joints
return sensory_activity[:n_motor] * 2.0
# Process one sensorimotor step
result = loop.process_sensorimotor_step(camera_image, neural_network)
print(f"Motor commands: {result['motor_commands']}")
print(f"Joint positions: {result['joint_state']['positions']}")from brain_simulation_advanced import (
AdvancedBrainSimulation,
NeurotransmitterType
)
# Create advanced simulation
sim = AdvancedBrainSimulation(use_gpu=False, dt_ms=0.1)
# Add multi-compartment neurons
for i in range(10):
sim.create_multicompartment_neuron(i, n_dendrites=5)
# Create network with different neurotransmitters
sim.create_stdp_synapse(0, 1, NeurotransmitterType.GLUTAMATE)
sim.create_stdp_synapse(1, 2, NeurotransmitterType.GLUTAMATE)
sim.create_stdp_synapse(2, 3, NeurotransmitterType.DOPAMINE)
sim.create_stdp_synapse(3, 4, NeurotransmitterType.GABA)
# Add cognitive modules
sim.add_cognitive_modules(
n_memory_units=7,
n_rl_states=10,
n_rl_actions=4
)
# Enable embodiment
sim.enable_embodiment()
# Optional: Enable GPU for larger networks
# sim.enable_gpu_acceleration(n_neurons=100000)
# Run simulation
print("Running integrated simulation...")
for step in range(1000):
stats = sim.simulate_step()
if step % 100 == 0:
print(f" Step {step}: {stats['spikes']} spikes")
# Get summary
print("\n" + sim.get_summary())# Run all 11 comprehensive demos
python advanced_demo.py
# Or run specific demos programmatically
python -c "from advanced_demo import demo_stdp; demo_stdp()"- Hodgkin-Huxley Dynamics - Realistic action potentials
- Multi-Compartment Neurons - Spatial voltage propagation
- STDP Learning - Spike-timing-dependent plasticity
- Neurotransmitter Systems - 6 different neurotransmitters
- GPU Acceleration - Large-scale parallel simulation
- Attention Mechanism - Top-down modulation
- Working Memory - Persistent activity
- Reinforcement Learning - Dopamine-based learning
- Decision Making - Evidence accumulation
- Sensorimotor Loop - Embodied cognition
- Integrated System - All features together
- README.md - Project overview and quick start
- ADVANCED_DOCUMENTATION.md - Complete API reference
- IMPLEMENTATION_PHASES_2-6.md - Implementation details
- SIMULATION_DOCUMENTATION.md - Basic simulation guide
# Study STDP learning rules
from advanced_demo import demo_stdp
demo_stdp()# Model realistic neurons
from advanced_demo import demo_multicompartment_neuron
demo_multicompartment_neuron()# Spiking neural networks with learning
from advanced_demo import demo_reinforcement_learning
demo_reinforcement_learning()# Sensorimotor control
from advanced_demo import demo_sensorimotor
demo_sensorimotor()- For small networks (<1000 neurons): Use CPU, multi-compartment OK
- For medium networks (1K-100K): Enable GPU acceleration
- For large networks (>100K): GPU required, use sparse connectivity
- For learning: Use STDP for temporal precision, Hebbian for simplicity
- For embodiment: Process sensory input in batches
GPU not detected?
import torch
print(torch.cuda.is_available()) # Should be TrueOut of memory?
- Reduce number of neurons
- Decrease connection probability
- Use sparse connectivity
- Simplify neuron model
Slow performance?
- Enable GPU acceleration
- Reduce compartments per neuron
- Use vectorized operations
- Batch process inputs
- Read the ADVANCED_DOCUMENTATION.md for complete API details
- Explore the demos in
advanced_demo.py - Build your own cognitive architecture
- Experiment with different neurotransmitters
- Try GPU acceleration on large networks
- Documentation: See
ADVANCED_DOCUMENTATION.md - Examples: Check
advanced_demo.py - Issues: Open a GitHub issue
- Questions: Refer to inline code documentation
Happy simulating! 🧠✨