TheAlgorithms-Python
238 строк · 6.7 Кб
1"""
2Perceptron
3w = w + N * (d(k) - y) * x(k)
4
5Using perceptron network for oil analysis, with Measuring of 3 parameters
6that represent chemical characteristics we can classify the oil, in p1 or p2
7p1 = -1
8p2 = 1
9"""
10import random
11
12
13class Perceptron:
14def __init__(
15self,
16sample: list[list[float]],
17target: list[int],
18learning_rate: float = 0.01,
19epoch_number: int = 1000,
20bias: float = -1,
21) -> None:
22"""
23Initializes a Perceptron network for oil analysis
24:param sample: sample dataset of 3 parameters with shape [30,3]
25:param target: variable for classification with two possible states -1 or 1
26:param learning_rate: learning rate used in optimizing.
27:param epoch_number: number of epochs to train network on.
28:param bias: bias value for the network.
29
30>>> p = Perceptron([], (0, 1, 2))
31Traceback (most recent call last):
32...
33ValueError: Sample data can not be empty
34>>> p = Perceptron(([0], 1, 2), [])
35Traceback (most recent call last):
36...
37ValueError: Target data can not be empty
38>>> p = Perceptron(([0], 1, 2), (0, 1))
39Traceback (most recent call last):
40...
41ValueError: Sample data and Target data do not have matching lengths
42"""
43self.sample = sample
44if len(self.sample) == 0:
45raise ValueError("Sample data can not be empty")
46self.target = target
47if len(self.target) == 0:
48raise ValueError("Target data can not be empty")
49if len(self.sample) != len(self.target):
50raise ValueError("Sample data and Target data do not have matching lengths")
51self.learning_rate = learning_rate
52self.epoch_number = epoch_number
53self.bias = bias
54self.number_sample = len(sample)
55self.col_sample = len(sample[0]) # number of columns in dataset
56self.weight: list = []
57
58def training(self) -> None:
59"""
60Trains perceptron for epochs <= given number of epochs
61:return: None
62>>> data = [[2.0149, 0.6192, 10.9263]]
63>>> targets = [-1]
64>>> perceptron = Perceptron(data,targets)
65>>> perceptron.training() # doctest: +ELLIPSIS
66('\\nEpoch:\\n', ...)
67...
68"""
69for sample in self.sample:
70sample.insert(0, self.bias)
71
72for _ in range(self.col_sample):
73self.weight.append(random.random())
74
75self.weight.insert(0, self.bias)
76
77epoch_count = 0
78
79while True:
80has_misclassified = False
81for i in range(self.number_sample):
82u = 0
83for j in range(self.col_sample + 1):
84u = u + self.weight[j] * self.sample[i][j]
85y = self.sign(u)
86if y != self.target[i]:
87for j in range(self.col_sample + 1):
88self.weight[j] = (
89self.weight[j]
90+ self.learning_rate
91* (self.target[i] - y)
92* self.sample[i][j]
93)
94has_misclassified = True
95# print('Epoch: \n',epoch_count)
96epoch_count = epoch_count + 1
97# if you want control the epoch or just by error
98if not has_misclassified:
99print(("\nEpoch:\n", epoch_count))
100print("------------------------\n")
101# if epoch_count > self.epoch_number or not error:
102break
103
104def sort(self, sample: list[float]) -> None:
105"""
106:param sample: example row to classify as P1 or P2
107:return: None
108>>> data = [[2.0149, 0.6192, 10.9263]]
109>>> targets = [-1]
110>>> perceptron = Perceptron(data,targets)
111>>> perceptron.training() # doctest: +ELLIPSIS
112('\\nEpoch:\\n', ...)
113...
114>>> perceptron.sort([-0.6508, 0.1097, 4.0009]) # doctest: +ELLIPSIS
115('Sample: ', ...)
116classification: P...
117"""
118if len(self.sample) == 0:
119raise ValueError("Sample data can not be empty")
120sample.insert(0, self.bias)
121u = 0
122for i in range(self.col_sample + 1):
123u = u + self.weight[i] * sample[i]
124
125y = self.sign(u)
126
127if y == -1:
128print(("Sample: ", sample))
129print("classification: P1")
130else:
131print(("Sample: ", sample))
132print("classification: P2")
133
134def sign(self, u: float) -> int:
135"""
136threshold function for classification
137:param u: input number
138:return: 1 if the input is greater than 0, otherwise -1
139>>> data = [[0],[-0.5],[0.5]]
140>>> targets = [1,-1,1]
141>>> perceptron = Perceptron(data,targets)
142>>> perceptron.sign(0)
1431
144>>> perceptron.sign(-0.5)
145-1
146>>> perceptron.sign(0.5)
1471
148"""
149return 1 if u >= 0 else -1
150
151
152samples = [
153[-0.6508, 0.1097, 4.0009],
154[-1.4492, 0.8896, 4.4005],
155[2.0850, 0.6876, 12.0710],
156[0.2626, 1.1476, 7.7985],
157[0.6418, 1.0234, 7.0427],
158[0.2569, 0.6730, 8.3265],
159[1.1155, 0.6043, 7.4446],
160[0.0914, 0.3399, 7.0677],
161[0.0121, 0.5256, 4.6316],
162[-0.0429, 0.4660, 5.4323],
163[0.4340, 0.6870, 8.2287],
164[0.2735, 1.0287, 7.1934],
165[0.4839, 0.4851, 7.4850],
166[0.4089, -0.1267, 5.5019],
167[1.4391, 0.1614, 8.5843],
168[-0.9115, -0.1973, 2.1962],
169[0.3654, 1.0475, 7.4858],
170[0.2144, 0.7515, 7.1699],
171[0.2013, 1.0014, 6.5489],
172[0.6483, 0.2183, 5.8991],
173[-0.1147, 0.2242, 7.2435],
174[-0.7970, 0.8795, 3.8762],
175[-1.0625, 0.6366, 2.4707],
176[0.5307, 0.1285, 5.6883],
177[-1.2200, 0.7777, 1.7252],
178[0.3957, 0.1076, 5.6623],
179[-0.1013, 0.5989, 7.1812],
180[2.4482, 0.9455, 11.2095],
181[2.0149, 0.6192, 10.9263],
182[0.2012, 0.2611, 5.4631],
183]
184
185target = [
186-1,
187-1,
188-1,
1891,
1901,
191-1,
1921,
193-1,
1941,
1951,
196-1,
1971,
198-1,
199-1,
200-1,
201-1,
2021,
2031,
2041,
2051,
206-1,
2071,
2081,
2091,
2101,
211-1,
212-1,
2131,
214-1,
2151,
216]
217
218
219if __name__ == "__main__":
220import doctest
221
222doctest.testmod()
223
224network = Perceptron(
225sample=samples, target=target, learning_rate=0.01, epoch_number=1000, bias=-1
226)
227network.training()
228print("Finished training perceptron")
229print("Enter values to predict or q to exit")
230while True:
231sample: list = []
232for i in range(len(samples[0])):
233user_input = input("value: ").strip()
234if user_input == "q":
235break
236observation = float(user_input)
237sample.insert(i, observation)
238network.sort(sample)
239