TheAlgorithms-Python

Форк
0
/
perceptron.py.DISABLED 
238 строк · 6.7 Кб
1
"""
2
    Perceptron
3
    w = w + N * (d(k) - y) * x(k)
4

5
    Using perceptron network for oil analysis, with Measuring of 3 parameters
6
    that represent chemical characteristics we can classify the oil, in p1 or p2
7
    p1 = -1
8
    p2 = 1
9
"""
10
import random
11

12

13
class Perceptron:
14
    def __init__(
15
        self,
16
        sample: list[list[float]],
17
        target: list[int],
18
        learning_rate: float = 0.01,
19
        epoch_number: int = 1000,
20
        bias: float = -1,
21
    ) -> None:
22
        """
23
        Initializes a Perceptron network for oil analysis
24
        :param sample: sample dataset of 3 parameters with shape [30,3]
25
        :param target: variable for classification with two possible states -1 or 1
26
        :param learning_rate: learning rate used in optimizing.
27
        :param epoch_number: number of epochs to train network on.
28
        :param bias: bias value for the network.
29

30
        >>> p = Perceptron([], (0, 1, 2))
31
        Traceback (most recent call last):
32
            ...
33
        ValueError: Sample data can not be empty
34
        >>> p = Perceptron(([0], 1, 2), [])
35
        Traceback (most recent call last):
36
            ...
37
        ValueError: Target data can not be empty
38
        >>> p = Perceptron(([0], 1, 2), (0, 1))
39
        Traceback (most recent call last):
40
            ...
41
        ValueError: Sample data and Target data do not have matching lengths
42
        """
43
        self.sample = sample
44
        if len(self.sample) == 0:
45
            raise ValueError("Sample data can not be empty")
46
        self.target = target
47
        if len(self.target) == 0:
48
            raise ValueError("Target data can not be empty")
49
        if len(self.sample) != len(self.target):
50
            raise ValueError("Sample data and Target data do not have matching lengths")
51
        self.learning_rate = learning_rate
52
        self.epoch_number = epoch_number
53
        self.bias = bias
54
        self.number_sample = len(sample)
55
        self.col_sample = len(sample[0])  # number of columns in dataset
56
        self.weight: list = []
57

58
    def training(self) -> None:
59
        """
60
        Trains perceptron for epochs <= given number of epochs
61
        :return: None
62
        >>> data = [[2.0149, 0.6192, 10.9263]]
63
        >>> targets = [-1]
64
        >>> perceptron = Perceptron(data,targets)
65
        >>> perceptron.training() # doctest: +ELLIPSIS
66
        ('\\nEpoch:\\n', ...)
67
        ...
68
        """
69
        for sample in self.sample:
70
            sample.insert(0, self.bias)
71

72
        for _ in range(self.col_sample):
73
            self.weight.append(random.random())
74

75
        self.weight.insert(0, self.bias)
76

77
        epoch_count = 0
78

79
        while True:
80
            has_misclassified = False
81
            for i in range(self.number_sample):
82
                u = 0
83
                for j in range(self.col_sample + 1):
84
                    u = u + self.weight[j] * self.sample[i][j]
85
                y = self.sign(u)
86
                if y != self.target[i]:
87
                    for j in range(self.col_sample + 1):
88
                        self.weight[j] = (
89
                            self.weight[j]
90
                            + self.learning_rate
91
                            * (self.target[i] - y)
92
                            * self.sample[i][j]
93
                        )
94
                    has_misclassified = True
95
            # print('Epoch: \n',epoch_count)
96
            epoch_count = epoch_count + 1
97
            # if you want control the epoch or just by error
98
            if not has_misclassified:
99
                print(("\nEpoch:\n", epoch_count))
100
                print("------------------------\n")
101
                # if epoch_count > self.epoch_number or not error:
102
                break
103

104
    def sort(self, sample: list[float]) -> None:
105
        """
106
        :param sample: example row to classify as P1 or P2
107
        :return: None
108
        >>> data = [[2.0149, 0.6192, 10.9263]]
109
        >>> targets = [-1]
110
        >>> perceptron = Perceptron(data,targets)
111
        >>> perceptron.training() # doctest: +ELLIPSIS
112
        ('\\nEpoch:\\n', ...)
113
        ...
114
        >>> perceptron.sort([-0.6508, 0.1097, 4.0009]) # doctest: +ELLIPSIS
115
        ('Sample: ', ...)
116
        classification: P...
117
        """
118
        if len(self.sample) == 0:
119
            raise ValueError("Sample data can not be empty")
120
        sample.insert(0, self.bias)
121
        u = 0
122
        for i in range(self.col_sample + 1):
123
            u = u + self.weight[i] * sample[i]
124

125
        y = self.sign(u)
126

127
        if y == -1:
128
            print(("Sample: ", sample))
129
            print("classification: P1")
130
        else:
131
            print(("Sample: ", sample))
132
            print("classification: P2")
133

134
    def sign(self, u: float) -> int:
135
        """
136
        threshold function for classification
137
        :param u: input number
138
        :return: 1 if the input is greater than 0, otherwise -1
139
        >>> data = [[0],[-0.5],[0.5]]
140
        >>> targets = [1,-1,1]
141
        >>> perceptron = Perceptron(data,targets)
142
        >>> perceptron.sign(0)
143
        1
144
        >>> perceptron.sign(-0.5)
145
        -1
146
        >>> perceptron.sign(0.5)
147
        1
148
        """
149
        return 1 if u >= 0 else -1
150

151

152
samples = [
153
    [-0.6508, 0.1097, 4.0009],
154
    [-1.4492, 0.8896, 4.4005],
155
    [2.0850, 0.6876, 12.0710],
156
    [0.2626, 1.1476, 7.7985],
157
    [0.6418, 1.0234, 7.0427],
158
    [0.2569, 0.6730, 8.3265],
159
    [1.1155, 0.6043, 7.4446],
160
    [0.0914, 0.3399, 7.0677],
161
    [0.0121, 0.5256, 4.6316],
162
    [-0.0429, 0.4660, 5.4323],
163
    [0.4340, 0.6870, 8.2287],
164
    [0.2735, 1.0287, 7.1934],
165
    [0.4839, 0.4851, 7.4850],
166
    [0.4089, -0.1267, 5.5019],
167
    [1.4391, 0.1614, 8.5843],
168
    [-0.9115, -0.1973, 2.1962],
169
    [0.3654, 1.0475, 7.4858],
170
    [0.2144, 0.7515, 7.1699],
171
    [0.2013, 1.0014, 6.5489],
172
    [0.6483, 0.2183, 5.8991],
173
    [-0.1147, 0.2242, 7.2435],
174
    [-0.7970, 0.8795, 3.8762],
175
    [-1.0625, 0.6366, 2.4707],
176
    [0.5307, 0.1285, 5.6883],
177
    [-1.2200, 0.7777, 1.7252],
178
    [0.3957, 0.1076, 5.6623],
179
    [-0.1013, 0.5989, 7.1812],
180
    [2.4482, 0.9455, 11.2095],
181
    [2.0149, 0.6192, 10.9263],
182
    [0.2012, 0.2611, 5.4631],
183
]
184

185
target = [
186
    -1,
187
    -1,
188
    -1,
189
    1,
190
    1,
191
    -1,
192
    1,
193
    -1,
194
    1,
195
    1,
196
    -1,
197
    1,
198
    -1,
199
    -1,
200
    -1,
201
    -1,
202
    1,
203
    1,
204
    1,
205
    1,
206
    -1,
207
    1,
208
    1,
209
    1,
210
    1,
211
    -1,
212
    -1,
213
    1,
214
    -1,
215
    1,
216
]
217

218

219
if __name__ == "__main__":
220
    import doctest
221

222
    doctest.testmod()
223

224
    network = Perceptron(
225
        sample=samples, target=target, learning_rate=0.01, epoch_number=1000, bias=-1
226
    )
227
    network.training()
228
    print("Finished training perceptron")
229
    print("Enter values to predict or q to exit")
230
    while True:
231
        sample: list = []
232
        for i in range(len(samples[0])):
233
            user_input = input("value: ").strip()
234
            if user_input == "q":
235
                break
236
            observation = float(user_input)
237
            sample.insert(i, observation)
238
        network.sort(sample)
239

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.