Open Madhu-1 opened 6 years ago
Related issue: https://github.com/gluster/glusterd2/issues/1223
@Madhu-1 can this be closed?
Fixed in #1243
reopening as am issue some more issue in snapshot
=== RUN TestSnapshot
=== RUN TestSnapshot/Create
=== RUN TestSnapshot/Activate
=== RUN TestSnapshot/List
=== RUN TestSnapshot/Mount
SIGQUIT: quit
PC=0x45f5a1 m=0 sigcode=0
goroutine 0 [idle]:
runtime.futex(0x15a0a60, 0x80, 0x0, 0x0, 0x0, 0xc000000000, 0x0, 0x0, 0x7fff9a3cd710, 0x40c2b2, ...)
/usr/local/go/src/runtime/sys_linux_amd64.s:531 +0x21
runtime.futexsleep(0x15a0a60, 0x100000000, 0xffffffffffffffff)
/usr/local/go/src/runtime/os_linux.go:46 +0x4b
runtime.notesleep(0x15a0a60)
/usr/local/go/src/runtime/lock_futex.go:151 +0xa2
runtime.stopm()
/usr/local/go/src/runtime/proc.go:2016 +0xe3
runtime.findrunnable(0xc00004aa00, 0x0)
/usr/local/go/src/runtime/proc.go:2487 +0x4dc
runtime.schedule()
/usr/local/go/src/runtime/proc.go:2613 +0x13a
runtime.park_m(0xc00024a300)
/usr/local/go/src/runtime/proc.go:2676 +0xae
runtime.mcall(0x0)
/usr/local/go/src/runtime/asm_amd64.s:299 +0x5b
goroutine 1 [chan receive, 7 minutes]:
testing.(*T).Run(0xc00030e400, 0xe1c9db, 0xc, 0xe50b90, 0x48ef01)
/usr/local/go/src/testing/testing.go:879 +0x37a
testing.runTests.func1(0xc00011e000)
/usr/local/go/src/testing/testing.go:1119 +0x78
testing.tRunner(0xc00011e000, 0xc000591da0)
/usr/local/go/src/testing/testing.go:827 +0xbf
testing.runTests(0xc00000c040, 0x15912e0, 0xa, 0xa, 0xe20c5d)
/usr/local/go/src/testing/testing.go:1117 +0x2aa
testing.(*M).Run(0xc00030c400, 0x0)
/usr/local/go/src/testing/testing.go:1034 +0x165
github.com/gluster/glusterd2/e2e.TestMain(0xc00030c400)
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/e2e/main_test.go:44 +0x20e
main.main()
_testmain.go:58 +0x13d
goroutine 58 [chan receive]:
github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil.(*MergeLogger).outputLoop(0xc000262140)
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go:174 +0x39e
created by github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil.NewMergeLogger
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go:92 +0x80
goroutine 89 [chan receive]:
github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil.(*MergeLogger).outputLoop(0xc00026e2a0)
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go:174 +0x39e
created by github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil.NewMergeLogger
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go:92 +0x80
goroutine 90 [chan receive]:
github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil.(*MergeLogger).outputLoop(0xc00026e300)
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go:174 +0x39e
created by github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil.NewMergeLogger
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go:92 +0x80
goroutine 9 [chan receive, 9 minutes]:
github.com/gluster/glusterd2/glusterd2/daemon.init.0.func1()
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/glusterd2/daemon/connection.go:72 +0xc6
created by github.com/gluster/glusterd2/glusterd2/daemon.init.0
/home/mrajanna/workspace/heketi_pr/src/github.com/gluster/glusterd2/glusterd2/daemon/connection.go:71 +0x35
goroutine 1087 [syscall, 7 minutes]:
even if the tests fail we should umount the mounted volumes
/dev/mapper/patchy_snap_vg_1-brick_lvm on /abc/gd2_func_test/TestSnapshot/bricks9773367171/patchy_snap_mnt type xfs (rw,relatime,seclabel,nouuid,attr2,inode64,sunit=128,swidth=128,noquota)
/dev/mapper/patchy_snap_vg_2-brick_lvm on /abc/gd2_func_test/TestSnapshot/bricks9773367172/patchy_snap_mnt type xfs (rw,relatime,seclabel,nouuid,attr2,inode64,sunit=128,swidth=128,noquota)
/dev/mapper/patchy_snap_vg_3-brick_lvm on /abc/gd2_func_test/TestSnapshot/bricks9773367173/patchy_snap_mnt type xfs (rw,relatime,seclabel,nouuid,attr2,inode64,sunit=128,swidth=128,noquota)
/dev/mapper/patchy_snap_vg_4-brick_lvm on /abc/gd2_func_test/TestSnapshot/bricks9773367174/patchy_snap_mnt type xfs (rw,relatime,seclabel,nouuid,attr2,inode64,sunit=128,swidth=128,noquota)
127.0.0.1:/snaps/snaptest on /abc/gd2_func_test/TestSnapshot/Mount/mnt945030024 type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
Observed behavior
if test cases in e2e fail, we are not cleaning up the resource, which leaves the mounted volume as it is
Expected/desired behavior
even if the test cases fails we need to clean up the used resources
Logs