ncxlib
  • ⚡Welcome
  • Getting Started
    • Quickstart
    • API Documentation
      • Overview
        • Neural Network
          • _compile
          • add_layer
          • forward_propagate_all
          • forward_propagate_all_no_save
          • back_propagation
          • train
          • predict
          • evaluate
          • save_model
          • load_model
        • Activation
          • ReLU
          • LeakyReLU
          • Sigmoid
          • Softmax
          • Tanh
        • Layer
          • InputLayer
          • FullyConnectedLayer
          • OutputLayer
        • LossFunction
          • MeanSquaredError
          • BinaryCrossEntropy
          • CategoricalCrossEntropy
        • Optimizer
          • SGD
          • SGDMomentum
          • RMSProp
          • Adam
        • Initializer
          • HeNormal
          • Zero
        • PreProcessor
          • OneHotEncoder
          • MinMaxScaler
          • Scaler
          • ImageRescaler
          • ImageGrayscaler
        • DataLoader
          • CSVDataLoader
          • ImageDataLoader
        • Generators
          • random_array
          • integer_array
          • generate_training_data
        • Utils
          • train_test_split
          • k_fold_cross_validation
Powered by GitBook
On this page
  1. Getting Started
  2. API Documentation
  3. Overview
  4. Layer

FullyConnectedLayer

class FullyConnectedLayer(Layer):
    def __init__(
        self,
        n_neurons: Optional[int] = None,
        n_inputs: Optional[int] = None,
        activation: Optional[Activation] = ReLU,
        optimizer: Optional[Optimizer] = SGD,
        loss_fn: Optional[LossFunction] = MeanSquaredError,
        initializer: Optional[Initializer] = HeNormal(),
        weights_initializer: Optional[Initializer] = HeNormal(),
        bias_initializer: Optional[Initializer] = Zero(),
        name: Optional[str] = ""
    ):
        super().__init__(n_neurons, n_inputs, activation, optimizer, loss_fn, name=name)

    def forward_propagation(self, inputs: np.ndarray, no_save: Optional[bool] = False) -> tuple[np.ndarray, int]:
        """
        inputs:
            An array of features (should be a numpy array)

        Returns:
            An array of the output values from each neuron in the layer.

        Function:
            Performs forward propagation by calculating the weighted sum for each neuron
        and applying the activation function
        """

        
       
        self.initialize_params(inputs)

        # calculate weighted sum: Wx + b
        weighted_sum = np.dot(self.W, self.inputs) + self.b

        # activate each neuron with self.activation function
        activated =  self.activation.apply(weighted_sum)

        # if saving: (bad var name i guess)
        if not no_save:
            self.z = weighted_sum
            self.activated = activated

        return activated
    
    def back_propagation(self, next_layer: Layer, learning_rate: float) -> np.ndarray:

        da_dz = self.activation.derivative(self.z) 

        dl_dz = (next_layer.old_W.T @ next_layer.gradients) * da_dz 

        self.old_W = self.W.copy()

        dz_dw = self.inputs.T  
        dl_dw = dl_dz @ dz_dw

        dl_db = dl_dz

        self.gradients = dl_dz

        self.W, self.b = self.optimizer.apply(self.W, dl_dw, self.b, dl_db)
PreviousInputLayerNextOutputLayer

Last updated 7 months ago