ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type numpy.ndarray). in trying to predict tesla stock

15,005

this worked tf.convert_to_tensor(y)

Share:
15,005
manan goyal
Author by

manan goyal

Updated on July 27, 2022

Comments

  • manan goyal
    manan goyal almost 2 years

    In the end you can see that i have tried converting this into a numpy array but I don't understand why tensorflow dosen't support it? I have looked at the other related pages but none seemed to help. Is there some other format i have to do to the data in order to properly fit in model?

    this is what keras says: x
    Vector, matrix, or array of training data (or list if the model has multiple inputs). If all inputs in the model are named, you can also pass a list mapping input names to data. x can be NULL (default) if feeding from framework-native tensors (e.g. TensorFlow data tensors).

    y
    Vector, matrix, or array of target (label) data (or list if the model has multiple outputs). If all outputs in the model are named, you can also pass a list mapping output names to data. y can be NULL (default) if feeding from framework-native tensors (e.g. TensorFlow data tensors).

    import pandas as pd
    from sklearn import preprocessing
    from collections import deque
    import numpy as np
    import random as rd
    import time
    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization
    
    
    
    data = pd.read_csv("TSLA.csv")
    
    data.set_index("Date", inplace=True)
    data = data[["Close", "Volume"]]
    
    Back_period_history = 100
    Future_predict = 10
    
    
    def classify(current, future):
        if float(future) > float(current):
            return 1
        else:
            return 0
    
    
    data["future"] = data["Close"].shift(-Future_predict)
    data["target"] = list(map(classify, data["Close"], data["future"]))
    
    
    #print(data.head(20))
    
    times = sorted(data.index.values)
    last_10pct = times[-int(0.1*len(times))]
    
    validation_data = data[(data.index >= last_10pct)]
    data = data[(data.index < last_10pct)]
    
    def preproccesing(data):
        data = data.drop("future", 1)
    
        for col in data.columns:
            if col != "target":
                data[col] = data[col].pct_change()
                data.dropna(inplace=True)
                data[col] = preprocessing.scale(data[col].values)
            data.dropna(inplace = True)
    
            sequential_data = []
            prev_days = deque(maxlen=Back_period_history)
            for i in data.values:
                prev_days.append([n for n in i[:-1]])
                if len(prev_days) == Back_period_history:
                    sequential_data.append([np.array(prev_days), i[-1]])
    
            rd.shuffle(sequential_data)
    
            buys = []
            sells = []
    
            for seq, target in sequential_data:
                if target == 0:
                    sells.append([seq, target])
                elif target == 1:
                    buys.append([seq,target])
    
            rd.shuffle(buys)
            rd.shuffle(sells)
    
            lower = min(len(buys), len(sells))
    
            buys = buys[:lower]
            sells = sells[:lower]
    
            sequential_data = buys+sells
    
            rd.shuffle(sequential_data)
    
            X = []
            y = []
    
            for seq, target in sequential_data:
                X.append(sequential_data)
                y.append(target)
    
            return np.array(X),y
    
    
    train_x, train_y = preproccesing(data)
    validation_x, validation_y = preproccesing(validation_data)
    
    model = Sequential()
    
    model.add(LSTM(
        128, input_shape = (train_x.shape[1:]), activation = "relu", return_sequences = True
    ))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    
    model.add(LSTM(
        128, input_shape = (train_x.shape[1:]), activation = "relu", return_sequences = True
    ))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    
    model.add(LSTM(
        128, input_shape = (train_x.shape[1:]), activation = "relu", return_sequences = True
    ))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    
    model.add(Dense(32, activation = "relu"))
    model.add(Dropout(0.2))
    
    model.add(Dense(2, activation = "softmax"))
    
    opt = tf.keras.optimizers.Adam()
    
    model.compile(loss="mse", optimizer=opt, metrics=["accuracy"])
    
    train_x = np.asarray(train_x)
    train_y = np.asarray(train_y)
    validation_x = np.asarray(validation_x)
    validation_y = np.asarray(validation_y)
    
    history = model.fit(train_x, train_y, batch_size = 64, epochs = 7, validation_data = (validation_x, validation_y))```