Closed hunse closed 2 years ago
What is the process to have this merged into the master
branch and reflected in the next release? For now we are using the fix-memory-leak
branch in our code.
Hi @kshivvy. We're planning on doing a release for the new version of NxSDK within the next month or so. We'll merge this into master and release it as part of that release.
Ok, fixed one last leak with the nodes in nengo_loihi.builder.inputs
not being garbage collected because they were using self.update
as their output function, creating a circular reference. This was a pretty significant leak, and I'm not seeing any more leaks now, so this is ready to go.
For the record, here's the script that I was using to do the memory profiling:
import gc
import weakref
import nengo
import numpy as np
import nengo_loihi
use_tracemalloc = False
# use_tracemalloc = True
if use_tracemalloc:
import tracemalloc
tracemalloc.start()
tracemalloc.start(25)
else:
from guppy import hpy
h = hpy()
def snapshot():
if use_tracemalloc:
return tracemalloc.take_snapshot()
else:
return h.heap()
def print_snapshot(snap):
if use_tracemalloc:
if (
isinstance(snap, list)
and len(snap) > 0
and isinstance(snap[0], tracemalloc.StatisticDiff)
):
for stat in snap[:20]:
print(stat)
# print("%s memory blocks: %.1f KiB" % (stat.count, stat.size / 1024))
for i, line in enumerate(stat.traceback.format()):
print(line)
if i > 5:
break
else:
print(snap)
else:
print(snap)
def snapshot_diff(snap0, snap1):
if use_tracemalloc:
return snap1.compare_to(snap0, "traceback")
else:
return snap1 - snap0
class NewClass:
def __init__(self):
self.input_size = 10
self.n_neurons = 500
self.initialize_nengo()
def initialize_nengo(self):
network = nengo.Network()
# with network:
# ens = nengo.Ensemble(n_neurons=self.n_neurons, dimensions=1, label="a")
# probe = nengo.Probe(ens)
with network:
def input_func(t):
return np.ones(10)
def output_func(t, x):
self.output = x
input_layer = nengo.Node(
output=input_func, size_in=0, size_out=self.input_size
)
ensemble = nengo.Ensemble(n_neurons=self.n_neurons, dimensions=1)
output_layer = nengo.Node(
output=output_func, size_in=self.n_neurons, size_out=0
)
conn_in = nengo.Connection(
input_layer,
ensemble.neurons,
transform=np.ones((self.n_neurons, self.input_size)),
)
conn_out = nengo.Connection(ensemble.neurons, output_layer)
self.network = network
def run(self, steps, num_resets):
snap_pre = snapshot()
print_snapshot(snap_pre)
snap0 = None
snapi = None
for i in range(num_resets):
with nengo_loihi.Simulator(self.network, precompute=True) as sim:
sim.run_steps(steps)
del sim
if i % 25 == 0 or i == num_resets - 1:
print("finished iteration:", i)
gc.collect()
snapi = snapshot()
# print_snapshot(snapi)
if i == 0:
snap0 = snapi
snapd = snapshot_diff(snap_pre, snapi)
print_snapshot(snapd)
if snapi is not None and snap0 is not None and snapi is not snap0:
print("Dynamic allocation (diff between first reset and n-th reset)")
snapd = snapshot_diff(snap0, snapi)
print_snapshot(snapd)
# import pdb; pdb.set_trace()
# steps = 10
# steps = 100
steps = 1000
# num_resets = 101
# num_resets = 51
num_resets = 26
# num_resets = 10
# num_resets = 3
# num_resets = 1
nengo_class = NewClass()
nengo_class.run(steps, num_resets)
To address #311.
TODO:
Probe.target
to see if there's a similar memory leak there.run
isn't called (there's still one, but it's much smaller).Test script:
This can also be run with
run_steps
commented out to test for builder memory leaks.