DiogenesAnalytics / autoencoder

Python library implementing various autoencoders.
https://colab.research.google.com/github/DiogenesAnalytics/autoencoder/blob/master/notebooks/demo/anomaly_detection.ipynb
MIT License
0 stars 0 forks source link

Design: Initial Class Strategy #4

Closed DiogenesAnalytics closed 11 months ago

DiogenesAnalytics commented 11 months ago

Problem

Need to devise the initial approach to designing the class hierarchy for autoencoder.

Solution

Tentative pseudo code for what the class hierarchy might look like (including any methods/attributes):

"""Autoencoder base class."""
from abc import ABC
from abc import abstractmethod
from dataclasses import dataclass
from math import ceil
from math import floor
from typing import Any
from typing import Dict
from typing import Generator
from typing import Optional
from typing import Tuple
from typing import TypeAlias

import keras
from keras.layers import Layer

# custom types
DefaultParams: TypeAlias = Dict[str, Tuple[Layer, Dict[str, Any]]]

class BaseLayerParams(ABC):
    """Autoencoder layers hyperparameters configuration base class."""

    def __post_init__(self) -> None:
        """Store updated params and get sequence index slices."""
        # get updated parameters for instance
        self._instance_parameters = self._build_instance_params()

    def _get_layer_masks(self) -> Tuple[slice, slice]:
        """Calculate slice masks for selecting encode/decode subrange."""
        # measure halfway point
        halfway = len(self._instance_parameters) / 2

        # calculate upper and lower
        upper, lower = ceil(halfway), floor(halfway)

        # get encode/decode masks
        encode, decode = slice(0, upper), slice(lower)

        # return masks
        return encode, decode

    def _get_encode_layers(self) -> Tuple[Tuple[Layer, Dict[str, Any]], ...]:
        """Get encoding layers subsequence."""
        # get encode mask
        encode, _ = self._get_layer_masks()

        # return subsequence
        return self._instance_parameters[encode]

    def _get_decode_layers(self) -> Tuple[Tuple[Layer, Dict[str, Any]], ...]:
        """Get decoding layer subsequence."""
        # get encode mask
        _, decode = self._get_layer_masks()

        # return subsequence
        return self._instance_parameters[decode]

    def _filter_layer_attrs(self) -> Generator[Tuple[str, Dict[str, Any]], None, None]:
        """Filter out layer attributes from class instance."""
        # get all attributes and values in class instance namespace
        for attr, value in self.__dict__.items():
            # make sure attribute name is in default parameters
            if attr in self.default_parameters.keys():
                # generate tuple pairs
                yield attr, value

    def _update_layer_params(
        self,
    ) -> Generator[Tuple[Layer, Dict[str, Any]], None, None]:
        """Update default layer parameters values."""
        # get layer instance attrs and their values
        for attr, value in self._filter_layer_attrs():
            # unpack default parameters
            layer, params = self.default_parameters[attr]

            # check if none
            if value is not None:
                # merge instance onto default
                params |= value

            # generate
            yield layer, params

    def _build_instance_params(self) -> Tuple[Tuple[Layer, Dict[str, Any]], ...]:
        """Create mutable sequence of layer params for instance."""
        return tuple(self._update_layer_params())

    @property
    @abstractmethod
    def default_parameters(self) -> DefaultParams:
        """Defines the required default layer parameters attribute."""
        pass

@dataclass
class BaseAutoencoder(ABC):
    """Autoencoder base class."""

    model_config: Optional[BaseLayerParams] = None

    def __post_init__(self) -> None:
        """Setup autoencoder model."""
        # check if default config used
        if self.model_config is None:
            # get default
            self.model_config = self._default_config

        # build model ...
        self.model = self._build_model()

    @property
    @abstractmethod
    def _default_config(self) -> BaseLayerParams:
        """Defines the default layer parameters attribute."""
        pass

    def _build_encoding_layer(self) -> keras.Model:
        """Assemble encoder from subsequence of encoding layers."""
        # get instance parameters for encoding layers
        assert self.model_config is not None
        inst_params = self.model_config._get_encode_layers()

        # generate layers from parameters
        encoding_layers = [layer(**params) for layer, params in inst_params]

        # create encoding model
        return keras.Sequential(encoding_layers)

    def _build_decoding_layer(self) -> keras.Model:
        """Assemble decoder from subsequence of decoding layers."""
        # get instance parameters for encoding layers
        assert self.model_config is not None
        inst_params = self.model_config._get_decode_layers()

        # generate layers from parameters
        decoding_layers = [layer(**params) for layer, params in inst_params]

        # create encoding model
        return keras.Sequential(decoding_layers)

    def _build_model(self) -> keras.Model:
        """Assemple autoencoder from encoder/decoder submodels."""
        # create encoding layer
        self._encode_layer = self._build_encoding_layer()

        # create decoding layer
        self._decode_layer = self._build_decoding_layer()

        # build model ...
        return keras.Sequential([self._encode_layer, self._decode_layer])

    def compile(self, **kwargs: Any) -> None:
        """Wrapper for Keras model.compile method."""
        self.model.compile(**kwargs)

    def fit(self, **kwargs: Any) -> None:
        """Wrapper for the Keras model.fit method."""
        self.model.fit(**kwargs)

    def predict(self, **kwargs: Any) -> None:
        """Wrapper for the Keras model.predict method."""
        self.model.predict(**kwargs)

    #
    # def encode(self):
    #     # encode data using trained encoding layer
    #     pass
    #
    # def decode(self):
    #     # decode data using trained decoding layer
    #     pass
    #
    # def visualize(self):
    #     # run custom visualization code
    #     pass
    #
    # def save(self, model_path: Union[str, Path]):
    #     pass
    #
    # def load(self, model_path: Union[str, Path]):
    #     pass

References

DiogenesAnalytics commented 11 months ago

Initial Steps

Need to identify what are the parts that all autoencoders have in common (i.e. the intersection of all the autoencoder implementations).

Commonalities

DiogenesAnalytics commented 11 months ago

Data Attributes

Continuing from the previous comment, what are the common data attributes (i.e. class / instance variables) common between the different autoencoder implementation strategies?

Tentative List

References

DiogenesAnalytics commented 11 months ago

ChatGPT: Building an Abstract Base Class for Autoencoders

Asked ChatGPT to build an Abstract Base Class for autoencoders.

The Question

"Can you build an abstract base class for autoencoders in Python 3?"

The Answer

"Certainly! An abstract base class (ABC) in Python can be created using the ABC module from the abc package. Here's an example of how you might create an abstract base class for autoencoders in Python 3:"

from abc import ABC, abstractmethod

class Autoencoder(ABC):
    def __init__(self):
        super().__init__()

    @abstractmethod
    def encode(self, input_data):
        """
        Encode input_data into a compressed representation.

        Parameters:
        - input_data: Input data to be encoded.

        Returns:
        Compressed representation of input_data.
        """
        pass

    @abstractmethod
    def decode(self, encoded_data):
        """
        Decode the encoded_data into its original form.

        Parameters:
        - encoded_data: Compressed representation to be decoded.

        Returns:
        Decoded version of the encoded_data.
        """
        pass

    @abstractmethod
    def train(self, input_data):
        """
        Train the autoencoder using the input_data.

        Parameters:
        - input_data: Training data for the autoencoder.
        """
        pass

    @abstractmethod
    def save(self, model_path):
        """
        Save the trained model to a file.

        Parameters:
        - model_path: Path where the model should be saved.
        """
        pass

    @abstractmethod
    def load(self, model_path):
        """
        Load a pre-trained model from a file.

        Parameters:
        - model_path: Path from where the model should be loaded.
        """
        pass

"In this example, Autoencoder is an abstract base class with abstract methods encode, decode, train, save, and load. You would then create concrete classes that inherit from this base class and implement these methods according to the specific autoencoder architecture you are working with. For example, a denoising autoencoder, variational autoencoder, etc., would be different concrete classes that implement these methods in their own way."

DiogenesAnalytics commented 11 months ago

Potential Obstacle: Hyperparameter Customization

Some problem points in generalizing the autencoder abstract base class.

Ratio Between Input Layer vs. Encoding/Decoding Layer

While the more simple autoencoders only utilize the encoding_dimension to build their internal model layers, more complex architectures (e.g. convolutional autoencoders) introduce other dimensions to build their layers (e.g. kernel_size, stride, etc ...).^1 This makes the generalization process more challenging ... but there are some strategies for dealing with this.^2^4

Significance of Hidden Layer Dimensions

"Regrettably, if the encoder and decoder are allowed too much capacity, this AE will fail to learn anything useful other than the ability to copy its input to its output ... In summary, using comparisons of the size of the hidden layer and the input layer, the basic AE structure can be divided into two categories: the narrow AE and wide AE (also known as under- complete and over-complete, respectively)." ^5

"Since in training, one may just get the identity operator for ๐ด and ๐ต, which keeps the achieved representation the same as the input, some additional regularization is required. The most common option is to make the dimension of the representation smaller than the input. This way, a ๐‘๐‘œ๐‘ก๐‘ก๐‘™๐‘’๐‘›๐‘’๐‘๐‘˜ is imposed. This option also directly serves the goal of getting a low dimensional representation of the data. This repre- sentation can be used for purposes such as data compression, feature extraction, etc." ^6

References

DiogenesAnalytics commented 11 months ago

Class Hierarchy: Fundamental Autoencoder Types

Upon studying the various autoencoder libraries and implementations in the above comment it is becoming more clear that there are fundamental types that rule ALL autoencoders.^1 Here these types will be explored/developed.

Autoencoder Types

Here is the list:

References

DiogenesAnalytics commented 11 months ago

Minimal Autoencoder Class

An example of a concrete class that inherits from BaseAutoEncoder.

Code

"""A simple autoencoder to get you started."""
from dataclasses import dataclass
from typing import Any
from typing import ClassVar
from typing import Dict
from typing import Optional

from keras import layers

from .base import BaseAutoencoder
from .base import BaseLayerParams
from .base import DefaultParams

@dataclass
class MinimalLayerParams(BaseLayerParams):
    """Layer parameters class for minimal autoencoder."""

    # setup default values
    default_parameters: ClassVar[DefaultParams] = {
        "l0": (layers.InputLayer, {"input_shape": (784,)}),
        "l1": (layers.Dense, {"units": 32, "activation": "relu"}),
        "l2": (layers.Dense, {"units": 784, "activation": "sigmoid"}),
    }

    # setup instance layer params
    l0: Optional[Dict[str, Any]] = None
    l1: Optional[Dict[str, Any]] = None
    l2: Optional[Dict[str, Any]] = None

class MinimalAutoencoder(BaseAutoencoder):
    """A simple autoencoder to get you started."""

    _default_config = MinimalLayerParams()

    def __init__(self, model_config: Optional[MinimalLayerParams] = None) -> None:
        """Overrided base constructor to set the layer params class used."""
        # call super
        super().__init__(model_config=model_config)
DiogenesAnalytics commented 11 months ago

Potential Obstacle: Keras Sequential Model

Some issues working with keras.Sequential ...

Problem

Need to design an intelligent way to take the layer/parameters combos and not only combine them into a keras.Model (i.e. probably using keras.Sequential but also create specific "sub" models for the encoder and decoder.

References

DiogenesAnalytics commented 11 months ago

Implemented with: 338fb8ca44e40c914f9701cf74e4690338935e24