SNBQT / Limited-Data-Rolling-Bearing-Fault-Diagnosis-with-Few-shot-Learning

This is the corresponding repository of paper Limited Data Rolling Bearing Fault Diagnosis with Few-shot Learning
292 stars 45 forks source link

Question #2

Closed gaoyacui closed 2 years ago

gaoyacui commented 3 years ago

When I run experiment AB, I always report the following error: AB: is_training = False # enable or disable train models. if enable training, save best models will be update.

def EXPAB_train_and_test(exp_name,exps,is_training): train_classes = sorted(list(set(data.y_train))) train_indices = [np.where(data.y_train == i)[0] for i in train_classes] for exp in exps: scores_1_shot = [] scores_5_shot = [] scores_5_shot_prod = [] scores_wdcnn = [] num = int(exp/len(train_classes)) settings['evaluate_every'] = 300 if exp<1000 else 600 print(settings['evaluate_every']) for time_idx in range(times): seed = int(time_idx/4)10 np.random.seed(seed) print('random seed:',seed) print("\n%s-%s"%(exp,time_idx) + ''*80) settings["savepath"] = "tmp/%s/size%s/time_%s/" % (exp_name,exp,time_idx) data._mkdir(settings["save_path"])

        train_idxs = []
        val_idxs = []
        for i, c in enumerate(train_classes):
            select_idx = train_indices[i][np.random.choice(len(train_indices[i]), num, replace=False)]
            split = int(0.6*num)
            train_idxs.extend(select_idx[:split])
            val_idxs.extend(select_idx[split:])
        X_train, y_train = data.X_train[train_idxs],data.y_train[train_idxs], 
        X_val, y_val = data.X_train[val_idxs],data.y_train[val_idxs], 

        print(train_idxs[0:10])
        print(val_idxs[0:10])

        # load one-shot model and training
        siamese_net = models.load_siamese_net()
        siamese_loader = siamese.Siamese_Loader(X_train,
                                        y_train,
                                        X_val,
                                        y_val)

        if(is_training):
            print(siamese.train_and_test_oneshot(settings,siamese_net,siamese_loader))

        # load wdcnn model and training
        y_train = keras.utils.to_categorical(y_train, data.nclasses)
        y_val = keras.utils.to_categorical(y_val, data.nclasses)
        y_test = keras.utils.to_categorical(data.y_test, data.nclasses)

        earlyStopping = EarlyStopping(monitor='val_loss', patience=20, verbose=0, mode='min')
        # checkpoint
        # filepath="tmp/weights-best-cnn-{epoch:02d}-{val_acc:.2f}.hdf5"
        filepath="%sweights-best-10-cnn-low-data.hdf5" % (settings["save_path"])
        checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max')
        callbacks_list = [earlyStopping,checkpoint]

        wdcnn_net = models.load_wdcnn_net()
        if(is_training):
                wdcnn_net.fit(X_train, y_train,
                          batch_size=32,
                          epochs=300,
                          verbose=0,
                          callbacks=callbacks_list,
                          validation_data=(X_val, y_val))

        # loading best weights and testing
        print("load best weights",settings["save_path"] + settings['save_weights_file'])
        siamese_net.load_weights(settings["save_path"] + settings['save_weights_file'])
        print("load best weights",filepath)
        wdcnn_net.load_weights(filepath)
        for snr in snrs:
            print("\n%s_%s_%s"%(exp,time_idx,snr) + '*'*80)
            X_test_noise = []
            if snr != None:
                for x in data.X_test:
                    X_test_noise.append(utils.noise_rw(x,snr))
                X_test_noise = np.array(X_test_noise)
            else:
                X_test_noise = data.X_test

            # test 1_shot and 5_shot
            siamese_loader.set_val(X_test_noise,data.y_test)
            s = 'val'
            preds_5_shot = []
            prods_5_shot = []
            scores = []
            for k in range(5):
                val_acc,preds, prods = siamese_loader.test_oneshot2(siamese_net,len(siamese_loader.classes[s]),
                                                             len(siamese_loader.data[s]),verbose=False)
#                 utils.confusion_plot(preds[:,1],preds[:,0])
                print(val_acc,preds.shape, prods.shape)
                scores.append(val_acc)
                preds_5_shot.append(preds[:,1])
                prods_5_shot.append(prods)
            preds = []
            for line in np.array(preds_5_shot).T:
                pass
                preds.append(np.argmax(np.bincount(line)))
#             utils.confusion_plot(np.array(preds),data.y_test) 
            prod_preds = np.argmax(np.sum(prods_5_shot,axis=0),axis=1).reshape(-1)

            score_5_shot = accuracy_score(data.y_test,np.array(preds))*100
            print('5_shot:',score_5_shot)

            score_5_shot_prod = accuracy_score(data.y_test,prod_preds)*100
            print('5_shot_prod:',score_5_shot_prod)

            scores_1_shot.append(scores[0])
            scores_5_shot.append(score_5_shot)
            scores_5_shot_prod.append(score_5_shot_prod)

            # test wdcnn
            score = wdcnn_net.evaluate(X_test_noise, y_test, verbose=0)[1]*100
            print('wdcnn:', score)
            scores_wdcnn.append(score)

    a =pd.DataFrame(np.array(scores_1_shot).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_1_shot.csv" % (exp_name,exp),index=True)

    a =pd.DataFrame(np.array(scores_5_shot).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_5_shot.csv" % (exp_name,exp),index=True)

    a =pd.DataFrame(np.array(scores_5_shot_prod).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_5_shot_prod.csv" % (exp_name,exp),index=True)

    a =pd.DataFrame(np.array(scores_wdcnn).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_wdcnn.csv" % (exp_name,exp),index=True)   

EXPAB_train_and_test(exp_name,exps,is_training)

error: AttributeError Traceback (most recent call last)

in 135 136 --> 137 EXPAB_train_and_test(exp_name,exps,is_training) 138 in EXPAB_train_and_test(exp_name, exps, is_training) 67 # loading best weights and testing 68 print("load best weights",settings["save_path"] + settings['save_weights_file']) ---> 69 siamese_net.load_weights(settings["save_path"] + settings['save_weights_file']) 70 print("load best weights",filepath) 71 wdcnn_net.load_weights(filepath) D:\anaconda\envs\tensorflow-gpu\lib\site-packages\keras\engine\network.py in load_weights(self, filepath, by_name, skip_mismatch, reshape) 1164 else: 1165 saving.load_weights_from_hdf5_group( -> 1166 f, self.layers, reshape=reshape) 1167 1168 def _updated_config(self): D:\anaconda\envs\tensorflow-gpu\lib\site-packages\keras\engine\saving.py in load_weights_from_hdf5_group(f, layers, reshape) 1007 """ 1008 if 'keras_version' in f.attrs: -> 1009 original_keras_version = f.attrs['keras_version'].decode('utf8') 1010 else: 1011 original_keras_version = '1' AttributeError: 'str' object has no attribute 'decode' Please advise, thank you
liguge commented 3 years ago

It would be great if there is a software that converts keras to pytorch.

LijinxiangBb commented 3 years ago

Hello, do you have the TMP and DataSets files? I can't open the link the author gave me. If you have , please email me at 1040101322@qq.com

SNBQT commented 3 years ago

When I run experiment AB, I always report the following error: AB: is_training = False # enable or disable train models. if enable training, save best models will be update.

def EXPAB_train_and_test(exp_name,exps,is_training): train_classes = sorted(list(set(data.y_train))) train_indices = [np.where(data.y_train == i)[0] for i in train_classes] for exp in exps: scores_1_shot = [] scores_5_shot = [] scores_5_shot_prod = [] scores_wdcnn = [] num = int(exp/len(train_classes)) settings['evaluate_every'] = 300 if exp<1000 else 600 print(settings['evaluate_every']) for time_idx in range(times): seed = int(time_idx/4)_10 np.random.seed(seed) print('random seed:',seed) print("\n%s-%s"%(exp,timeidx) + ''*80) settings["savepath"] = "tmp/%s/size%s/time_%s/" % (exp_name,exp,time_idx) data._mkdir(settings["save_path"])

        train_idxs = []
        val_idxs = []
        for i, c in enumerate(train_classes):
            select_idx = train_indices[i][np.random.choice(len(train_indices[i]), num, replace=False)]
            split = int(0.6*num)
            train_idxs.extend(select_idx[:split])
            val_idxs.extend(select_idx[split:])
        X_train, y_train = data.X_train[train_idxs],data.y_train[train_idxs], 
        X_val, y_val = data.X_train[val_idxs],data.y_train[val_idxs], 

        print(train_idxs[0:10])
        print(val_idxs[0:10])

        # load one-shot model and training
        siamese_net = models.load_siamese_net()
        siamese_loader = siamese.Siamese_Loader(X_train,
                                        y_train,
                                        X_val,
                                        y_val)

        if(is_training):
            print(siamese.train_and_test_oneshot(settings,siamese_net,siamese_loader))

        # load wdcnn model and training
        y_train = keras.utils.to_categorical(y_train, data.nclasses)
        y_val = keras.utils.to_categorical(y_val, data.nclasses)
        y_test = keras.utils.to_categorical(data.y_test, data.nclasses)

        earlyStopping = EarlyStopping(monitor='val_loss', patience=20, verbose=0, mode='min')
        # checkpoint
        # filepath="tmp/weights-best-cnn-{epoch:02d}-{val_acc:.2f}.hdf5"
        filepath="%sweights-best-10-cnn-low-data.hdf5" % (settings["save_path"])
        checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max')
        callbacks_list = [earlyStopping,checkpoint]

        wdcnn_net = models.load_wdcnn_net()
        if(is_training):
                wdcnn_net.fit(X_train, y_train,
                          batch_size=32,
                          epochs=300,
                          verbose=0,
                          callbacks=callbacks_list,
                          validation_data=(X_val, y_val))

        # loading best weights and testing
        print("load best weights",settings["save_path"] + settings['save_weights_file'])
        siamese_net.load_weights(settings["save_path"] + settings['save_weights_file'])
        print("load best weights",filepath)
        wdcnn_net.load_weights(filepath)
        for snr in snrs:
            print("\n%s_%s_%s"%(exp,time_idx,snr) + '*'*80)
            X_test_noise = []
            if snr != None:
                for x in data.X_test:
                    X_test_noise.append(utils.noise_rw(x,snr))
                X_test_noise = np.array(X_test_noise)
            else:
                X_test_noise = data.X_test

            # test 1_shot and 5_shot
            siamese_loader.set_val(X_test_noise,data.y_test)
            s = 'val'
            preds_5_shot = []
            prods_5_shot = []
            scores = []
            for k in range(5):
                val_acc,preds, prods = siamese_loader.test_oneshot2(siamese_net,len(siamese_loader.classes[s]),
                                                             len(siamese_loader.data[s]),verbose=False)
#                 utils.confusion_plot(preds[:,1],preds[:,0])
                print(val_acc,preds.shape, prods.shape)
                scores.append(val_acc)
                preds_5_shot.append(preds[:,1])
                prods_5_shot.append(prods)
            preds = []
            for line in np.array(preds_5_shot).T:
                pass
                preds.append(np.argmax(np.bincount(line)))
#             utils.confusion_plot(np.array(preds),data.y_test) 
            prod_preds = np.argmax(np.sum(prods_5_shot,axis=0),axis=1).reshape(-1)

            score_5_shot = accuracy_score(data.y_test,np.array(preds))*100
            print('5_shot:',score_5_shot)

            score_5_shot_prod = accuracy_score(data.y_test,prod_preds)*100
            print('5_shot_prod:',score_5_shot_prod)

            scores_1_shot.append(scores[0])
            scores_5_shot.append(score_5_shot)
            scores_5_shot_prod.append(score_5_shot_prod)

            # test wdcnn
            score = wdcnn_net.evaluate(X_test_noise, y_test, verbose=0)[1]*100
            print('wdcnn:', score)
            scores_wdcnn.append(score)

    a =pd.DataFrame(np.array(scores_1_shot).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_1_shot.csv" % (exp_name,exp),index=True)

    a =pd.DataFrame(np.array(scores_5_shot).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_5_shot.csv" % (exp_name,exp),index=True)

    a =pd.DataFrame(np.array(scores_5_shot_prod).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_5_shot_prod.csv" % (exp_name,exp),index=True)

    a =pd.DataFrame(np.array(scores_wdcnn).reshape(-1,len(snrs)))
    a.columns = snrs
    a.to_csv("tmp/%s/size_%s/scores_wdcnn.csv" % (exp_name,exp),index=True)   

EXPAB_train_and_test(exp_name,exps,is_training)

error: AttributeError Traceback (most recent call last)

in 135 136 --> 137 EXPAB_train_and_test(exp_name,exps,is_training) 138

in EXPAB_train_and_test(exp_name, exps, is_training) 67 # loading best weights and testing 68 print("load best weights",settings["save_path"] + settings['save_weights_file']) ---> 69 siamese_net.load_weights(settings["save_path"] + settings['save_weights_file']) 70 print("load best weights",filepath) 71 wdcnn_net.load_weights(filepath)

D:\anaconda\envs\tensorflow-gpu\lib\site-packages\keras\engine\network.py in load_weights(self, filepath, by_name, skip_mismatch, reshape) 1164 else: 1165 saving.load_weights_from_hdf5_group( -> 1166 f, self.layers, reshape=reshape) 1167 1168 def _updated_config(self):

D:\anaconda\envs\tensorflow-gpu\lib\site-packages\keras\engine\saving.py in load_weights_from_hdf5_group(f, layers, reshape) 1007 """ 1008 if 'keras_version' in f.attrs: -> 1009 original_keras_version = f.attrs['keras_version'].decode('utf8') 1010 else: 1011 original_keras_version = '1'

AttributeError: 'str' object has no attribute 'decode'

Please advise, thank you

This is keras package error. Please check the information below:

D:\anaconda\envs\tensorflow-gpu\lib\site-packages\keras\engine\saving.py in load_weights_from_hdf5_group(f, layers, reshape) 1007 """ 1008 if 'keras_version' in f.attrs: -> 1009 original_keras_version = f.attrs['keras_version'].decode('utf8') 1010 else: 1011 original_keras_version = '1'

AttributeError: 'str' object has no attribute 'decode'

SNBQT commented 3 years ago

It would be great if there is a software that converts keras to pytorch.

You can try it in models.py and siamese.py. But it would be hard.

SNBQT commented 3 years ago

Hello, do you have the TMP and DataSets files? I can't open the link the author gave me. If you have , please email me at 1040101322@qq.com

DataSets: https://pan.baidu.com/s/1WgJMPSDcipugR1Bh4KRadg 提取码: 7uu9 TMP: https://pan.baidu.com/s/1k9xkejB-3YRqDunKA9AUsw 提取码: htgw

LijinxiangBb commented 3 years ago

Thank you very much-------- 原始邮件 --------发件人: Ansi Zhang @.>日期: 2021年5月31日周一 下午2:20收件人: SNBQT/Limited-Data-Rolling-Bearing-Fault-Diagnosis-with-Few-shot-Learning @.>抄送: LijinxiangBb @.>, Comment @.>主 题: Re: [SNBQT/Limited-Data-Rolling-Bearing-Fault-Diagnosis-with-Few-shot-Learning] Question (#2)

Hello, do you have the TMP and DataSets files?

I can't open the link the author gave me.

If you have , please email me at @.***

DataSets: https://pan.baidu.com/s/1WgJMPSDcipugR1Bh4KRadg 提取码: 7uu9

TMP: https://pan.baidu.com/s/1k9xkejB-3YRqDunKA9AUsw 提取码: htgw

—You are receiving this because you commented.Reply to this email directly, view it on GitHub, or unsubscribe.