Skip to content

Commit 6825862

Browse files
Fix type inconsistencies in gradient_descent.py and resolve mypy errors
Refactor gradient descent code to use float initialization for parameter vectors.
1 parent dfe35fb commit 6825862

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

machine_learning/gradient_descent.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@
55

66
from __future__ import annotations
77

8+
from collections.abc import Sequence
9+
from typing import Literal
10+
811
import numpy as np
9-
from typing import Literal, Sequence
1012

1113
# List of input, output pairs
1214
train_data: tuple[tuple[tuple[int, ...], int], ...] = (
@@ -48,7 +50,7 @@ def _hypothesis_value(data_input_tuple: Sequence[int]) -> float:
4850
It is not explicitly mentioned in input data.. But, ML hypothesis functions use it.
4951
So, we have to take care of it separately. Line 36 takes care of it.
5052
"""
51-
hyp_val = 0
53+
hyp_val = 0.0
5254
for i in range(len(parameter_vector) - 1):
5355
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
5456
hyp_val += parameter_vector[0]
@@ -130,7 +132,7 @@ def run_gradient_descent() -> None:
130132
j = 0
131133
while True:
132134
j += 1
133-
temp_parameter_vector = [0, 0, 0, 0]
135+
temp_parameter_vector = [0.0] * len(parameter_vector)
134136
for i in range(len(parameter_vector)):
135137
cost_derivative = get_cost_derivative(i - 1)
136138
temp_parameter_vector[i] = (

0 commit comments

Comments
 (0)