Closed jerrycraft2 closed 6 years ago
Hello, I've edited your question. As for your question, I feel a little context must be had. I originally uploaded this example as part of teaching material, hence it's not complete - the students are supposed to complete it.
But here's a hint: You can actually create a new *tapeMachine
, use m.g.SubgraphRoots(m.out)
to create a new graph for the new tape machine. Then you can use gorgonia.Read
to read m.out
out into a Value
, which can be used for printing or sending to google sheets as a proto tensorboard.
As for saving/loading your data: you should serialize m.w0.Value().(*tensor.Dense)
for w0
, w1
until w4
. The current version of Gorgonia only supports Gob encoding, but in the upcoming version, protobuf and flatbufs are supported as well.
btw, if you email me your attempt at answering the question, I will tell you whether you are on the right path. Or if you are close, I'll share my canonical answer
Dear Chewxy,
Thank you for your help. I added the code (red color). I am sure I am not in right direction, because I have this error.
It have this error : read ÷ false(%3c, %3f) :: Matrix float64 into
0xc04210c9c0 ::
func main() { flag.Parse() parseDtype() rand.Seed(1337)
// intercept Ctrl+C sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) doneChan := make(chan bool, 1)
var inputs, targets tensor.Tensor var err error
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
trainOn := *dataset if inputs, targets, err = mnist.Load(trainOn, loc, dt); err != nil { log.Fatal(err) }
// the data is in (numExamples, 784). // In order to use a convnet, we need to massage the data // into this format (batchsize, numberOfChannels, height, width). // // This translates into (numExamples, 1, 28, 28). // // This is because the convolution operators actually understand height and width. // // The 1 indicates that there is only one channel (MNIST data is black and white). numExamples := inputs.Shape()[0] bs := *batchsize // todo - check bs not 0
if err := inputs.Reshape(numExamples, 1, 28, 28); err != nil { log.Fatal(err) } g := gorgonia.NewGraph() x := gorgonia.NewTensor(g, dt, 4, gorgonia.WithShape(bs, 1, 28, 28), gorgonia.WithName("x")) y := gorgonia.NewMatrix(g, dt, gorgonia.WithShape(bs, 10), gorgonia.WithName("y")) m := newConvNet(g) if err = m.fwd(x); err != nil { log.Fatalf("%+v", err) } losses := gorgonia.Must(gorgonia.HadamardProd(m.out, y)) cost := gorgonia.Must(gorgonia.Mean(losses)) cost = gorgonia.Must(gorgonia.Neg(cost))
// we wanna track costs var costVal gorgonia.Value gorgonia.Read(cost, &costVal)
if _, err = gorgonia.Grad(cost, m.learnables()...); err != nil { log.Fatal(err) }
// debug // ioutil.WriteFile("fullGraph.dot", []byte(g.ToDot()), 0644) // prog, , := gorgonia.Compile(g) // log.Printf("%v", prog) // logger := log.New(os.Stderr, "", 0) // vm := gorgonia.NewTapeMachine(g, gorgonia.BindDualValues(m.learnables()...), gorgonia.WithLogger(logger), gorgonia.WithWatchlist())
vm := gorgonia.NewTapeMachine(g, gorgonia.BindDualValues(m.learnables()...)) solver := gorgonia.NewRMSPropSolver(gorgonia.WithBatchSize(float64(bs)))
// pprof // handlePprof(sigChan, doneChan)
var profiling bool if cpuprofile != "" { f, err := os.Create(cpuprofile) if err != nil { log.Fatal(err) } profiling = true pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } go cleanup(sigChan, doneChan, profiling)
batches := numExamples / bs log.Printf("Batches %d", batches) bar := pb.New(batches) bar.SetRefreshRate(time.Second) bar.SetMaxWidth(80)
for i := 0; i < epochs; i++ { bar.Prefix(fmt.Sprintf("Epoch %d", i)) bar.Set(0) bar.Start() for b := 0; b < batches; b++ { start := b bs end := start + bs if start >= numExamples { break } if end > numExamples { end = numExamples }
var xVal, yVal tensor.Tensor if xVal, err = inputs.Slice(sli{start, end}); err != nil { log.Fatal("Unable to slice x") }
if yVal, err = targets.Slice(sli{start, end}); err != nil { log.Fatal("Unable to slice y") } if err = xVal.(*tensor.Dense).Reshape(bs, 1, 28, 28); err != nil { log.Fatalf("Unable to reshape %v", err) }
gorgonia.Let(x, xVal) gorgonia.Let(y, yVal) if err = vm.RunAll(); err != nil { log.Fatalf("Failed at epoch %d: %v", i, err) } solver.Step(m.learnables()) vm.Reset() bar.Increment() } log.Printf("Epoch %d | cost %v", i, costVal)
}
g2 := m.g.SubgraphRoots(m.out) vm2 := gorgonia.NewTapeMachine(g2)
var x2Val tensor.Tensor if x2Val, err = inputs.Slice(sli{0, 100}); err != nil { log.Fatal("Unable to slice x") } if err = x2Val.(*tensor.Dense).Reshape(100, 1, 28, 28); err != nil { log.Fatalf("Unable to reshape %v", err) }
gorgonia.Let(x, x2Val)
if err = vm2.RunAll(); err != nil { fmt.Println(err) } var y2 gorgonia.Value y2Val := gorgonia.Read(m.out, &y2) fmt.Println(y2Val) }
Best Regards Jerry
On Tue, Jul 17, 2018 at 6:45 PM, Chewxy notifications@github.com wrote:
btw, if you email me your attempt at answering the question, I will tell you whether you are on the right path. Or if you are close, I'll share my canonical answer
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/gorgonia/gorgonia/issues/220#issuecomment-405540236, or mute the thread https://github.com/notifications/unsubscribe-auth/AeQNAy5Y5nNm6LhU39iilAUtQLnLNGmbks5uHcBPgaJpZM4VSlZx .
func main() {
flag.Parse()
parseDtype()
rand.Seed(1337)
// intercept Ctrl+C
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
doneChan := make(chan bool, 1)
var inputs, targets tensor.Tensor
var err error
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
trainOn := *dataset
if inputs, targets, err = mnist.Load(trainOn, loc, dt); err != nil {
log.Fatal(err)
}
// the data is in (numExamples, 784).
// In order to use a convnet, we need to massage the data
// into this format (batchsize, numberOfChannels, height, width).
//
// This translates into (numExamples, 1, 28, 28).
//
// This is because the convolution operators actually understand height and width.
//
// The 1 indicates that there is only one channel (MNIST data is black and white).
numExamples := inputs.Shape()[0]
bs := *batchsize
// todo - check bs not 0
if err := inputs.Reshape(numExamples, 1, 28, 28); err != nil {
log.Fatal(err)
}
g := gorgonia.NewGraph()
x := gorgonia.NewTensor(g, dt, 4, gorgonia.WithShape(bs, 1, 28, 28),
gorgonia.WithName("x"))
y := gorgonia.NewMatrix(g, dt, gorgonia.WithShape(bs, 10),
gorgonia.WithName("y"))
m := newConvNet(g)
if err = m.fwd(x); err != nil {
log.Fatalf("%+v", err)
}
losses := gorgonia.Must(gorgonia.HadamardProd(m.out, y))
cost := gorgonia.Must(gorgonia.Mean(losses))
cost = gorgonia.Must(gorgonia.Neg(cost))
// we wanna track costs
var costVal gorgonia.Value
gorgonia.Read(cost, &costVal)
if _, err = gorgonia.Grad(cost, m.learnables()...); err != nil {
log.Fatal(err)
}
// debug
// ioutil.WriteFile("fullGraph.dot", []byte(g.ToDot()), 0644)
// prog, _, _ := gorgonia.Compile(g)
// log.Printf("%v", prog)
// logger := log.New(os.Stderr, "", 0)
// vm := gorgonia.NewTapeMachine(g, gorgonia.BindDualValues(m.learnables()...), gorgonia.WithLogger(logger),gorgonia.WithWatchlist())
vm := gorgonia.NewTapeMachine(g, gorgonia.BindDualValues(m.learnables()...))
solver := gorgonia.NewRMSPropSolver(gorgonia.WithBatchSize(float64(bs)))
// pprof
// handlePprof(sigChan, doneChan)
var profiling bool
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
profiling = true
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
go cleanup(sigChan, doneChan, profiling)
batches := numExamples / bs
log.Printf("Batches %d", batches)
bar := pb.New(batches)
bar.SetRefreshRate(time.Second)
bar.SetMaxWidth(80)
for i := 0; i < *epochs; i++ {
bar.Prefix(fmt.Sprintf("Epoch %d", i))
bar.Set(0)
bar.Start()
for b := 0; b < batches; b++ {
start := b * bs
end := start + bs
if start >= numExamples {
break
}
if end > numExamples {
end = numExamples
}
var xVal, yVal tensor.Tensor
if xVal, err = inputs.Slice(sli{start, end}); err != nil {
log.Fatal("Unable to slice x")
}
if yVal, err = targets.Slice(sli{start, end}); err != nil {
log.Fatal("Unable to slice y")
}
if err = xVal.(*tensor.Dense).Reshape(bs, 1, 28, 28); err != nil {
log.Fatalf("Unable to reshape %v", err)
}
gorgonia.Let(x, xVal)
gorgonia.Let(y, yVal)
if err = vm.RunAll(); err != nil {
log.Fatalf("Failed at epoch %d: %v", i, err)
}
solver.Step(m.learnables())
vm.Reset()
bar.Increment()
}
log.Printf("Epoch %d | cost %v", i, costVal)
}
g2 := m.g.SubgraphRoots(m.out)
vm2 := gorgonia.NewTapeMachine(g2)
var x2Val tensor.Tensor
if x2Val, err = inputs.Slice(sli{0, 100}); err != nil {
log.Fatal("Unable to slice x")
}
if err = x2Val.(*tensor.Dense).Reshape(100, 1, 28, 28); err != nil {
log.Fatalf("Unable to reshape %v", err)
}
gorgonia.Let(x, x2Val)
if err = vm2.RunAll(); err != nil {
fmt.Println(err)
}
var y2 gorgonia.Value
y2Val := gorgonia.Read(m.out, &y2)
fmt.Println(y2Val)
}
Reformatted for better reading. Will reply in a bit
(btw, you ARE on the correct direction)
The fix I would make is this:
var y2 gorgonia.Value
y2Val := gorgonia.Read(m.out, &y2)
g2 := m.g.SubgraphRoots(y2Val)
vm2 := gorgonia.NewTapeMachine(g2)
var x2Val tensor.Tensor
if x2Val, err = inputs.Slice(sli{0, 100}); err != nil {
log.Fatal("Unable to slice x")
}
if err = x2Val.(*tensor.Dense).Reshape(100, 1, 28, 28); err != nil {
log.Fatalf("Unable to reshape %v", err)
}
gorgonia.Let(x, x2Val)
if err = vm2.RunAll(); err != nil {
fmt.Println(err)
}
fmt.Println(y2)
Read
is one of those heretical "impure" function that emulates being pure. It communicates with the outside world rather like how IO()
in haskell communicates with the outside world.
Hi Chewxy, It works. Thank you very much.
Regarding the serialize m.w0.Value().(*tensor.Dense) for w0, w1 until w4 I have tried the code below. Save and Load. For testing, I load it right before NewTapeMachine. Can you take a look if this is correct way?
And is saving w0, w1, w2, w3 and w4 is sufficient to serialize the model for prediction?
Thank you.
func main() {
flag.Parse()
parseDtype()
rand.Seed(1337)
// intercept Ctrl+C
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
doneChan := make(chan bool, 1)
var inputs, targets tensor.Tensor
var err error
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
trainOn := *dataset
if inputs, targets, err = mnist.Load(trainOn, loc, dt); err != nil {
log.Fatal(err)
}
// the data is in (numExamples, 784).
// In order to use a convnet, we need to massage the data
// into this format (batchsize, numberOfChannels, height, width).
//
// This translates into (numExamples, 1, 28, 28).
//
// This is because the convolution operators actually understand height and width.
//
// The 1 indicates that there is only one channel (MNIST data is black and white).
numExamples := inputs.Shape()[0]
bs := *batchsize
// todo - check bs not 0
if err := inputs.Reshape(numExamples, 1, 28, 28); err != nil {
log.Fatal(err)
}
g := gorgonia.NewGraph()
x := gorgonia.NewTensor(g, dt, 4, gorgonia.WithShape(bs, 1, 28, 28), gorgonia.WithName("x"))
y := gorgonia.NewMatrix(g, dt, gorgonia.WithShape(bs, 10), gorgonia.WithName("y"))
m = newConvNet(g)
if err = m.fwd(x); err != nil {
log.Fatalf("%+v", err)
}
losses := gorgonia.Must(gorgonia.HadamardProd(m.out, y))
cost := gorgonia.Must(gorgonia.Mean(losses))
cost = gorgonia.Must(gorgonia.Neg(cost))
// we wanna track costs
var costVal gorgonia.Value
gorgonia.Read(cost, &costVal)
if _, err = gorgonia.Grad(cost, m.learnables()...); err != nil {
log.Fatal(err)
}
// debug
// ioutil.WriteFile("fullGraph.dot", []byte(g.ToDot()), 0644)
// prog, _, _ := gorgonia.Compile(g)
// log.Printf("%v", prog)
// logger := log.New(os.Stderr, "", 0)
// vm := gorgonia.NewTapeMachine(g, gorgonia.BindDualValues(m.learnables()...), gorgonia.WithLogger(logger), gorgonia.WithWatchlist())
Load("convnet.gob")
vm := gorgonia.NewTapeMachine(g, gorgonia.BindDualValues(m.learnables()...))
solver := gorgonia.NewRMSPropSolver(gorgonia.WithBatchSize(float64(bs)))
// pprof
// handlePprof(sigChan, doneChan)
var profiling bool
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
profiling = true
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
go cleanup(sigChan, doneChan, profiling)
batches := numExamples / bs
log.Printf("Batches %d", batches)
bar := pb.New(batches)
bar.SetRefreshRate(time.Second)
bar.SetMaxWidth(80)
for i := 0; i < *epochs; i++ {
bar.Prefix(fmt.Sprintf("Epoch %d", i))
bar.Set(0)
bar.Start()
for b := 0; b < batches; b++ {
start := b * bs
end := start + bs
if start >= numExamples {
break
}
if end > numExamples {
end = numExamples
}
var xVal, yVal tensor.Tensor
if xVal, err = inputs.Slice(sli{start, end}); err != nil {
log.Fatal("Unable to slice x")
}
if yVal, err = targets.Slice(sli{start, end}); err != nil {
log.Fatal("Unable to slice y")
}
if err = xVal.(*tensor.Dense).Reshape(bs, 1, 28, 28); err != nil {
log.Fatalf("Unable to reshape %v", err)
}
gorgonia.Let(x, xVal)
gorgonia.Let(y, yVal)
if err = vm.RunAll(); err != nil {
log.Fatalf("Failed at epoch %d: %v", i, err)
}
solver.Step(m.learnables())
vm.Reset()
bar.Increment()
}
log.Printf("Epoch %d | cost %v", i, costVal)
}
var y2 gorgonia.Value
y2Val := gorgonia.Read(m.out, &y2)
g2 := m.g.SubgraphRoots(y2Val)
vm2 := gorgonia.NewTapeMachine(g2)
var x2Val tensor.Tensor
if x2Val, err = inputs.Slice(sli{0, 100}); err != nil {
log.Fatal("Unable to slice x")
}
if err = x2Val.(*tensor.Dense).Reshape(100, 1, 28, 28); err != nil {
log.Fatalf("Unable to reshape %v", err)
}
gorgonia.Let(x, x2Val)
if err = vm2.RunAll(); err != nil {
fmt.Println(err)
}
// fmt.Println(y2.Shape())
p := y2.Data().([]float64)
s := 0
for _, v := range p {
fmt.Print(v)
s++
if s >= 10 {
fmt.Println("")
s = 0
}
}
Save("convnet.gob")
}
func Save(filename string) (err error) {
var f io.WriteCloser
if f, err = os.OpenFile(filename, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644); err != nil {
fmt.Println(err)
return
}
encoder := gob.NewEncoder(f)
if err = encoder.Encode(m.w0.Value().(*tensor.Dense)); err != nil {
fmt.Println(err)
return
}
if err = encoder.Encode(m.w1.Value().(*tensor.Dense)); err != nil {
fmt.Println(err)
return
}
if err = encoder.Encode(m.w2.Value().(*tensor.Dense)); err != nil {
fmt.Println(err)
return
}
if err = encoder.Encode(m.w3.Value().(*tensor.Dense)); err != nil {
fmt.Println(err)
return
}
if err = encoder.Encode(m.w4.Value().(*tensor.Dense)); err != nil {
fmt.Println(err)
return
}
f.Close()
fmt.Println("Saved")
return
}
func Load(filename string) (err error) {
var f io.ReadCloser
if f, err = os.Open(filename); err != nil {
return
}
decoder := gob.NewDecoder(f)
var wt0, wt1, wt2, wt3 *tensor.Dense
if err = decoder.Decode(&wt0); err != nil {
return
}
gorgonia.Let(m.w0, wt0)
if err = decoder.Decode(&wt1); err != nil {
return
}
gorgonia.Let(m.w1, wt1)
if err = decoder.Decode(&wt2); err != nil {
return
}
gorgonia.Let(m.w2, wt2)
if err = decoder.Decode(&wt3); err != nil {
return
}
gorgonia.Let(m.w3, wt3)
f.Close()
fmt.Println("Loaded")
return
}
Yes. You will need to write a new function/method to load the serialized value into the neural network (use gorgonia.WithValue
) ...
Actually, after formatting your code, yeah, your method works too.
Dear Chewxy, Thank you very much for your help. Really appreciate it.
Hi,
I have tried example code CONVNET. I am able to run it. After the program finish the training, is there a way to predict immediately?
I have tried for few days without any luck. I could not find any sample code which is doing prediction after Training. Or saving and load the result after Training. Hope you can help me.