Madhur215 / Chatbot-cum-voice-Assistant

An AI chatbot with features like conversation through voice, fetching events from Google calendar, make notes, or searching a query on Google.
34 stars 18 forks source link

the tensorflow and tflearn versions are clashing , pls help #19

Open Madhu-7727 opened 10 months ago

Madhu-7727 commented 10 months ago

C:\Users\madhu\PycharmProjects\pythonProject1\venv\Scripts\python.exe C:\Users\madhu\PycharmProjects\pythonProject1\main.py Traceback (most recent call last): File "C:\Users\madhu\PycharmProjects\pythonProject1\main.py", line 3, in from model import Createmodel File "C:\Users\madhu\PycharmProjects\pythonProject1\model.py", line 2, in import tflearn File "C:\Users\madhu\PycharmProjects\pythonProject1\venv\lib\site-packages\tflearn__init__.py", line 7, in from . import config File "C:\Users\madhu\PycharmProjects\pythonProject1\venv\lib\site-packages\tflearn\config.py", line 5, in from .variables import variables File "C:\Users\madhu\PycharmProjects\pythonProject1\venv\lib\site-packages\tflearn\variables.py", line 11, in def variable(name, shape=None, dtype=tf.float32, initializer=None, AttributeError: module 'tensorflow' has no attribute 'float32'

shubh2moon commented 9 months ago

use pytorch instead of tflearn

import numpy as np import torch import torch.nn as nn from nltk.stem.lancaster import LancasterStemmer

class create_model:

def __init__(self, train, output, tags, all_questions_words):
    # Reset default graph for PyTorch
    torch.manual_seed(42)

    self.tags = tags
    self.words = all_questions_words
    self.network = nn.Sequential(
        nn.Linear(len(train[0]), 8),
        nn.ReLU(),
        nn.Linear(8, 8),
        nn.ReLU(),
        nn.Linear(8, len(output[0])),
        nn.Softmax(dim=-1)
    )
    self.model = self.network

def fit_model(self, train, output, n=400, batch=8, metric=True):
    loss_fn = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(self.model.parameters())
    for epoch in range(n):
        for t in range(0, len(train), batch):
            # Convert data to PyTorch tensors
            inputs = torch.tensor(train[t:t + batch], dtype=torch.float)
            targets = torch.tensor(output[t:t + batch], dtype=torch.long)

            # Forward pass
            output_pred = self.model(inputs)

            # Calculate loss and backpropagate
            loss = loss_fn(output_pred, targets)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Optional: show training metrics
            if metric and t % 100 == 0:
                print(f"Epoch: {epoch}/{n}, Loss: {loss.item()}")

def input_words(self, sentence):
    bag_of_words = [0 for _ in range(len(self.words))]
    stemmer = LancasterStemmer()
    sentence_words = nltk.word_tokenize(sentence)
    sentence_words = [stemmer.stem(w.lower()) for w in sentence_words]

    for s in sentence_words:
        for i, j in enumerate(self.words):
            if j == s:
                bag_of_words[i] = 1

    return np.array(bag_of_words)

def predict_tag(self, sentence):
    # Convert input to PyTorch tensor
    input_tensor = torch.tensor(self.input_words(sentence), dtype=torch.float).unsqueeze(0)

    # Make prediction
    output_pred = self.model(input_tensor)
    return torch.argmax(output_pred, dim=-1).item()

def get_tags(self):
    return self.tags
shanesirohi commented 6 months ago

The python version might be causing this, I recommend to use older python versions