Closed NathanRoseCE closed 3 years ago
class NN:
""" A Neural network that is useful in cases where it is not deep and you cannot leverage the linear algebra as much, hoewver in return for this, you get greater flexibility in the model"""
def __init__(self, numInputs=someDefault, numOutput=someOtherDefault, nodes = someDefaultList):
self.nodes = nodes
#not sure how you would designate input nodes or link input to non-randomized values
class Node:
"""This class is for the non-dense neural network where you need to determine the inputs and whatnot
def __init__(self, preDefinedInputs=[], activationFunction=ReLU):
# activation function is a clas
#randomize inputs if none listed
self.inputs = inputs
self.activationFuntion = activationFunction
class activationMethods(Enum.enum):
RELU,
someOtherOnesImTired
class ActivationFunction:
""" does the activationMethod """
def __init__(self, activationMethod=RELU):
self.method = activationMethod
def forward(self, inputs):
#forard pass
def backward(self, values):
#backward pass
class DenseNN:
"""Dense Neural Network for that sweet sweet efficency"""
def __init__(self, numInputs=someDefault, numOutputs=NumOutputs, numLayers=default, numNodesInLayer=default):
layers = #some setup stuff
def input(self, values):
#returns NN output
class Layer:
def __init__(self, numNodes, activationFunction):
#set up all those weights and biases
def forward(self, values):
#forward pass
def backward(self, values):
#backpropigation
def optimize(self)
#will have to think more about how to do this
class Optimizer:
""" Optimizer can probably be common like activation function
def __init__(self, ):
#set up all those weights and biases
def forward(self, values):
#forward pass
def backward(self, values):
#backpropigation
def optimize(self)
#will have to think more about how to do this
Visual representation of unstructured neural network: (maybe we could add visual constructions as a web app sorta thing, for both this and the layered NN)
from EasyNN import NeuralNetwork as NN
from EasyNN.activation_functions import logistic
model = NN(
# using dict only to make indexes clearer
nodes = {
# input nodes connect to nothing
0: [],
1: [],
2: [],
3: [],
# hidden nodes
4: [0, 1, 2],
5: [1, 3, 4],
6: [0, 2, 4, 5],
7: [4, 5, 6],
# output nodes are not in any connections
8: [4, 5, 6, 7],
9: [4, 6],
10: [4, 5, 7],
11: [4, 7],
},
# using dict to only specify last 4 nodes
activation_function = {
# output is between 0 and 1
8: logistic,
9: logistic,
10: logistic,
11: logistic,
},
)
model.train( # or model.fit(...)
# inputs
x = [[0, 0, 0, 0],
[0, 1, 0, 1],
[1, 1, 1, 0],
[0, 0, 1, 1]],
# outputs
y = [[0, 0, 0, 0],
[1, 0, 1, 0],
[1, 0, 0, 1],
[1, 1, 1, 0]],
)
print(model([1, 1, 0, 0])) # the output when given [1, 1, 0, 0]
from EasyNN import DeepNN as DNN
from EasyNN.activation_functions import logistic
model = DNN(
layers = [4, 3, 3, 4],
activation_function = {3: logistic},
)
model.train( # or model.fit(...)
# inputs
x = [[0, 0, 0, 0],
[0, 1, 0, 1],
[1, 1, 1, 0],
[0, 0, 1, 1]],
# outputs
y = [[0, 0, 0, 0],
[1, 0, 1, 0],
[1, 0, 0, 1],
[1, 1, 1, 0]],
)
print(model([1, 1, 0, 0])) # the output when given [1, 1, 0, 0]
class NeuralNetwork:
"""General NN structure which allows greater flexibility (and neuroevolution?)."""
nodes: Sequence[NeuralNode]
def __init__(self, nodes: Sequence[int], activation: Dict[int, Callable[[float], float]] = {}):
"""Convert input integers to actual nodes, with activation functions appropriately."""
pass
def __call__(self, input_values: Sequence[float], pad: float = 0) -> Sequence[float]:
"""
Fill in node.value's with the input_values,
fill in remaining nodes with the pad value,
perform feed-forward propagation,
and return the node.value's from the output_nodes.
"""
pass
# Possibly add @properties for getting input/output nodes
class NeuralNode:
"""General Neural Node structure which allows greater flexibility (and neuroevolution?)."""
value: float # scalar
bias: float # scalar
activation_function: Callable[[float], float]
nodes: Sequence[NeuralNode] # vector
weights: Sequence[float] # vector
# for back_propogation
value_change: Gradient # scalar
bias_change: Gradient # scalar
weights_change: Gradient # vector
def __init__(
self,
bias: float = None,
activation_function: Callable[[float], float] = lambda x: x, # no change
nodes: Sequence[NeuralNode] = (,)
):
"""
Initialize with float (random by default),
activation function (no change by default),
and connected nodes (no connections by default)
and random weights.
Initialize change to 0.
"""
pass
def __mul__(self, scalar: float) -> float:
"""
Multiply self.value by the given scalar and return the result.
Useful for easy numpy dot products.
"""
return self.value * scalar
def __rmul__(self, scalar: float) -> float:
"""
Multiply self.value by the given scalar and return the result.
Useful for easy numpy dot products.
"""
return self.value * scalar
def feed_forward(self):
"""Uses connected nodes to compute self.value."""
pass
def back_propogate(self):
"""
Modify change attributes to more closely match value_change.
Modify nodes.value_change to more closely match value_change.
Modify respective attributes based on their attr_change.
"""
pass
class DeepNN:
"""Layered NN structure, which allows more efficient operations."""
layers: Sequence[NeuralLayer]
def __init__(self, layers: Sequence[int], activation: Dict[int, Callable[[float], float]] = {}):
"""Convert input integers to actual layers, with activation functions appropriately."""
pass
def __call__(self, input_values: Sequence[float], pad: float = 0) -> Sequence[float]:
"""
Fill in the first layer with the input_values,
fill in remaining nodes/layers with the pad value,
perform feed-forward propagation,
and return the last layer's values.
"""
pass
# Possibly add @properties for getting input/output nodes for consistency
class NeuralLayer:
"""General Neural Node structure which allows greater flexibility (and neuroevolution?)."""
values: Sequence[float] # vector
bias: Sequence[float] # vector
activation_function: Callable[[float], float]
nodes: Sequence[NeuralNode] # vector
weights: Sequence[Sequence[float]] # matrix
# for back_propogation
value_change: Gradient # vector
bias_change: Gradient # vector
weights_change: Gradient # matrix
def __init__(
self,
bias: float = None,
activation_function: Callable[[float], float] = lambda x: x, # no change
previous_layer: Sequence[NeuralNode] = (,),
):
"""
Initialize with float (random by default),
activation function (no change by default),
and previous_layer (empty layer by default)
with random weights.
Initialize change to 0 vector.
"""
pass
def __getitem__(self, index):
"""Returns indexed value."""
pass
def __iter__(self):
"""Loop over values (slightly redundant)."""
pass
def __len__(self):
"""Returns the length of the values."""
pass
def feed_forward(self):
"""Uses connected nodes to compute self.value."""
pass
def back_propogate(self):
"""
Modify change attributes to more closely match value_change.
Modify nodes.value_change to more closely match value_change.
Modify respective attributes based on their attr_change.
"""
pass
class Gradient:
"""Class for handling gradient calculations."""
# may be a scalar, vector, or matrix
derivative: Union[float, Sequence[float], Sequence[Sequence[float]]]
iteration: int
# hyperparameters and other attributes,
# depending on the specific gradient descent method.
def __init__(self, shape: Tuple[int, int]):
"""Initialize derivative based on given shape and default iteration."""
# Default iteration of 0 or 1? Should it be an option?
# Add @properties for modifying the derivative
# for example, hyperparameters may influence derivative getters and setters
User Interaction
Output:
Class structure
NN()
DeepNN()
DeepNN - class layer