[关闭]
@Pigmon 2017-09-17T20:24:48.000000Z 字数 10310 阅读 941

机器学习的数学基础 Day 5 P1 《人工神经网络初步》

Python


Neuron 类

  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on Sun Apr 09 17:25:53 2017
  4. @author: Yuan Sheng
  5. """
  6. #import random
  7. import numpy as np
  8. import random as rnd
  9. def sigmoid(x):
  10. return 1. / (1. + np.e ** (-x))
  11. class Neuron:
  12. "神经元基类"
  13. # layer: 所在层,第一个输入层为0
  14. # index: 在当前层的位置,最上面为0
  15. # gid: 全局位置,从 0 开始
  16. # 整个网络输入在左,输出在右
  17. # 从输入神经元算起,按左上到右下依次排列
  18. # 2
  19. # ->0 5 7->
  20. # 3
  21. # ->1 6 8->
  22. # 4
  23. layer, index, gid = 0, 0, 0
  24. # output: 输出值
  25. # error: 误差项
  26. output, error = 0.0, 0.0
  27. def __init__ (self, _layer, _index, _global_index):
  28. self.layer, self.index, self.gid = _layer, _index, _global_index
  29. self.bias = rnd.uniform(-1.0, 1.0)
  30. def sigmoid_derive(self):
  31. return self.output * (1.0 - self.output)
  32. def count_out(self, _in_w, _in_vec):
  33. "计算输出值"
  34. if len(_in_w) != len(_in_vec):
  35. return 0.0
  36. self.output = sigmoid(self.bias + np.dot(_in_w, _in_vec))
  37. return self.output
  38. def count_error(self, _out_w, _next_layer_error):
  39. "计算误差项"
  40. if len(_out_w) != len(_next_layer_error):
  41. return 0.0
  42. self.error = self.sigmoid_derive() * np.dot(_out_w, _next_layer_error)
  43. return self.error
  44. def update_bais(self, _eta):
  45. "调整 bias 值"
  46. self.bias -= _eta * self.error
  47. def debug(self):
  48. print ('%d: <%d, %d> b=%f' % (self.gid, self.layer, self.index, self.bias))
  49. class OutputNeuron(Neuron):
  50. "输出层神经元"
  51. def __init__ (self, _layer, _index, _global_index):
  52. Neuron.__init__(self, _layer, _index, _global_index)
  53. def count_error(self, _label):
  54. self.error = self.sigmoid_derive() * (self.output - _label)
  55. return self.error

BPNN 类

  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on Sun Apr 09 20:50:49 2017
  4. @author: Yuan Sheng
  5. 为了降低阅读代码的难度,并没有采用矩阵乘法,而是直接套用ppt中的公式
  6. """
  7. import numpy as np
  8. import random as rnd
  9. from neuron import Neuron, OutputNeuron
  10. class BPNN:
  11. "BP前馈神经网络"
  12. # @input_vec_len: 输入层维数
  13. # @output_vec_len: 输出层维数
  14. # @hidden_layer_count: 隐藏层数
  15. # @eta: 学习率,步长
  16. # @threshold: 全局均方差的阈值
  17. input_vec_len, output_vec_len, hidden_layer_count, eta, threshold = 2, 1, 0, 0.2, 0.1
  18. # @hidden_layers_len: 每个隐藏层维数
  19. # @hidden_neurons:隐藏神经元二维数组
  20. # @output_neurons:输出神经元一维数组
  21. # @weights:权重三维数组
  22. hidden_layers_len, hidden_neurons, output_neurons, weights = [], [], [], []
  23. # 全局位置到(layer, index)的映射字典; 当前的输出值
  24. gid_dict = {}
  25. # -----------------输出变量-----------------
  26. _correct_rate, _output_vec = 0.0, []
  27. # -------------------------------------------------------------------------
  28. def __init__ (self, _in_len, _out_len, _hiddens_len, _learning_rate):
  29. self.input_vec_len, self.output_vec_len, self.hidden_layer_count, self.hidden_layers_len, self.eta = \
  30. _in_len, _out_len, len(_hiddens_len), _hiddens_len, _learning_rate
  31. # 生成隐藏层 Nerons
  32. cnter = _in_len
  33. for i in range(0, len(_hiddens_len)):
  34. cnt, layer_list = _hiddens_len[i], []
  35. for j in range(0, cnt):
  36. layer_list.append(Neuron(i, j, cnter))
  37. self.gid_dict[cnter] = (i, j)
  38. cnter += 1
  39. self.hidden_neurons.append(layer_list)
  40. # 生成输出层 Neurons
  41. for i in range(0, _out_len):
  42. self.output_neurons.append(OutputNeuron(self.hidden_layer_count, i, cnter))
  43. self.gid_dict[cnter] = (self.hidden_layer_count, i)
  44. cnter += 1
  45. # 生成权重,例如:
  46. # 形如下面这样的输入层2,隐藏层为(3, 2),输出层为2:
  47. #
  48. # 2
  49. # ->0 5 7->
  50. # 3
  51. # ->1 6 8->
  52. # 4
  53. #
  54. # 权重数组如下所示:
  55. # [
  56. # [ [w_0_2, w_1_2], [w_0_3, w_1_3], [w_0_4, w_1_4] ],
  57. # [ [w_2_5, w_3_5, w_4_5], [w_2_6, w_3_6, w_4_6] ],
  58. # [ [w_5_7, w_6_7], [w_5_8, w_6_8] ]
  59. # ]
  60. #
  61. # 可以在循环最内部,用pair.append(self.debug_weight_index(cnt_list, i, j, k))调试
  62. #
  63. cnt_list = np.append(np.array([_in_len]), np.append(_hiddens_len, _out_len))
  64. #print cnt_list
  65. for i in range(0, len(cnt_list)-1):
  66. layer1_cnt, layer2_cnt, layer_weight = cnt_list[i], cnt_list[i + 1], []
  67. for j in range(0, layer2_cnt):
  68. pair = []
  69. for k in range(0, layer1_cnt):
  70. pair.append(rnd.uniform(-1.0, 1.0))
  71. #pair.append(self.debug_weight_index(cnt_list, i, j, k))
  72. layer_weight.append(pair)
  73. self.weights.append(layer_weight)
  74. # -------------------------------------------------------------------------
  75. def input_weights_of(self, layer, index):
  76. """
  77. 得到在第layer层的第index个神经元的输入权重(左侧)列表。
  78. layer 0 代表第一个隐藏层,index 0代表最上面的神经元
  79. """
  80. return self.weights[layer][index]
  81. # -------------------------------------------------------------------------
  82. def input_weights_of_gid(self, _gid):
  83. "通过gid得到输入权重列表"
  84. return self.input_weights_of(self.gid_dict[_gid][0], self.gid_dict[_gid][1])
  85. # -------------------------------------------------------------------------
  86. def output_weights_of(self, layer, index):
  87. """
  88. 得到在第layer层的第index个神经元的输出权重(右侧)列表。
  89. layer 0 代表第一个隐藏层,index 0代表最上面的神经元
  90. """
  91. # 如果是输出层的神经元,返回空数组
  92. if layer == self.hidden_layer_count:
  93. return []
  94. next_layer_weights, ret = self.weights[layer + 1], []
  95. for group in next_layer_weights:
  96. ret.append(group[index])
  97. return ret
  98. # -------------------------------------------------------------------------
  99. def output_weights_of_gid(self, _gid):
  100. "通过gid得到输出权重列表"
  101. return self.output_weights_of(self.gid_dict[_gid][0], self.gid_dict[_gid][1])
  102. # -------------------------------------------------------------------------
  103. def pre_layer_outs(self, layer, input_vec):
  104. """
  105. 得到在第layer层的第index个神经元的<前一层>神经元的值的列表。
  106. layer 0 代表第一个隐藏层,index 0代表最上面的神经元
  107. """
  108. if layer == 0:
  109. return input_vec
  110. else:
  111. pre_layer_neurons, ret = self.hidden_neurons[layer - 1], []
  112. for neuron in pre_layer_neurons:
  113. ret.append(neuron.output)
  114. return ret
  115. # -------------------------------------------------------------------------
  116. def next_layer_errors(self, layer):
  117. "得到第layer+1层的神经元的误差项列表"
  118. # 如果是输出层的神经元,返回空数组
  119. if layer >= self.hidden_layer_count:
  120. return []
  121. next_layer_neurons, ret = [], []
  122. if layer == self.hidden_layer_count - 1:
  123. next_layer_neurons = self.output_neurons
  124. else:
  125. next_layer_neurons = self.hidden_neurons[layer + 1]
  126. for neuron in next_layer_neurons:
  127. ret.append(neuron.error)
  128. return ret
  129. # -------------------------------------------------------------------------
  130. def forward(self, input_vec):
  131. "前向传播计算值的过程"
  132. assert len(input_vec) == self.input_vec_len
  133. self._output_vec = []
  134. for i in range(0, len(self.hidden_neurons)):
  135. hidden_layer = self.hidden_neurons[i]
  136. for j in range(0, len(hidden_layer)):
  137. neuron, input_values, input_weights = \
  138. hidden_layer[j], self.pre_layer_outs(i, input_vec), self.input_weights_of(i, j)
  139. neuron.count_out(input_weights, input_values)
  140. for j in range(0, len(self.output_neurons)):
  141. neuron, input_values, input_weights = self.output_neurons[j], \
  142. self.pre_layer_outs(self.hidden_layer_count, input_vec), self.input_weights_of(self.hidden_layer_count, j)
  143. neuron.count_out(input_weights, input_values)
  144. self._output_vec.append(neuron.output)
  145. return self._output_vec;
  146. # -------------------------------------------------------------------------
  147. def backward(self, _result_vec, _label_vec):
  148. "后向传播误差项"
  149. assert len(_result_vec) == len(_label_vec)
  150. # 计算输出层误差项
  151. for j in range(0, len(self.output_neurons)):
  152. self.output_neurons[j].count_error(_label_vec[j])
  153. # 从后向前计算隐藏层误差项
  154. arr, length = self.hidden_neurons[::-1], len(self.hidden_neurons)
  155. for i in range(0, length):
  156. hidden_layer, layer_idx = arr[i], length - i - 1
  157. for j in range(0, len(hidden_layer)):
  158. neuron, output_weights, next_layer_errors = \
  159. hidden_layer[j], self.output_weights_of(layer_idx, j), self.next_layer_errors(layer_idx)
  160. neuron.count_error(output_weights, next_layer_errors)
  161. # -------------------------------------------------------------------------
  162. def update_param(self, _inpput_vec):
  163. "调整误差和 bias 的值,可以放到 backward 函数里,单独写出来是为了看起来清晰"
  164. # 调整输出层的输入权重和 bias
  165. for j in range(0, len(self.output_neurons)):
  166. neuron, input_values, input_weights = self.output_neurons[j], \
  167. self.pre_layer_outs(self.hidden_layer_count, []), self.input_weights_of(self.hidden_layer_count, j)
  168. neuron.update_bais(self.eta)
  169. for i in range(0, len(input_weights)):
  170. input_weights[i] -= self.eta * neuron.error * input_values[i]
  171. # 更新 self.weights
  172. self.weights[self.hidden_layer_count][j] = input_weights
  173. # 调整隐藏层的输入权重和 bias
  174. for i in range(0, len(self.hidden_neurons)):
  175. hidden_layer = self.hidden_neurons[i]
  176. for j in range(0, len(hidden_layer)):
  177. neuron, input_values, input_weights =\
  178. hidden_layer[j], self.pre_layer_outs(i, _inpput_vec), self.input_weights_of(i, j)
  179. neuron.update_bais(self.eta)
  180. for k in range(0, len(input_weights)):
  181. input_weights[k] -= self.eta * neuron.error * input_values[k]
  182. # 更新 self.weights
  183. self.weights[i][j] = input_weights
  184. # -------------------------------------------------------------------------
  185. @staticmethod
  186. def correct_rate(_vec1, _vec2):
  187. "正确率"
  188. assert len(_vec1) == len(_vec2)
  189. counter = 0
  190. for i in range(0, len(_vec1)):
  191. if abs(_vec1[i] - _vec2[i]) < 0.5:
  192. counter += 1
  193. return float(counter) / float(len(_vec1))
  194. # -------------------------------------------------------------------------
  195. def total_error_var(self, _train_set):
  196. total = 0
  197. for smpl in _train_set:
  198. total += (self.predict_labeled([smpl])[0] - smpl[1]) ** 2
  199. return total / len(_train_set)
  200. # -------------------------------------------------------------------------
  201. def predict_labeled(self, _test_set):
  202. "_test_set 每个元素,传入之前需要格式化成 [[x1, x2, ...], label]"
  203. assert len(_test_set) > 0 and len(_test_set[0]) == 2
  204. ret_vec, label_vec = [], []
  205. for sample in _test_set:
  206. ret_vec.append(self.forward(sample[0])[0])
  207. label_vec.append(sample[1])
  208. self._correct_rate = BPNN.correct_rate(ret_vec, label_vec)
  209. return ret_vec
  210. # -------------------------------------------------------------------------
  211. @staticmethod
  212. def sigsign(x):
  213. if x >= 0.5:
  214. return 1
  215. else:
  216. return 0
  217. # -------------------------------------------------------------------------
  218. def predict(self, _attr_set):
  219. "只包含特征的测试集,[[x1, x2, ...], [x1, x2,...], ...]"
  220. ret_vec = []
  221. for point in _attr_set:
  222. #ret_vec.append(self.forward(point))
  223. ret_vec.append(BPNN.sigsign(self.forward(point)[0]))
  224. return ret_vec
  225. # -------------------------------------------------------------------------
  226. def fit(self, _train_set, max_epochs = 10000):
  227. "_train_set 每个元素,传入之前需要格式化成 [[x1, x2, ...], label]"
  228. assert len(_train_set) > 0 and len(_train_set[0]) == 2
  229. counter, go_through = 0, False
  230. while (counter <= max_epochs):
  231. if self.total_error_var(_train_set) <= self.threshold:
  232. go_through = True
  233. break
  234. # 1. 随机选取一个测试样本
  235. sample = rnd.choice(_train_set)
  236. input_vec, label = sample[0], sample[1]
  237. # 2. 前向计算值
  238. y_hat = self.forward(input_vec)
  239. # 3. 反向传播误差
  240. self.backward([y_hat], [label])
  241. # 4. 更新参数
  242. self.update_param(input_vec)
  243. counter += 1
  244. return go_through
  245. # -------------------------------------------------------------------------
  246. # Debug Functions
  247. # -------------------------------------------------------------------------
  248. def debug(self):
  249. "输出 debug 信息"
  250. print ('Hidden Layers Length: ' + str(nn.hidden_layers_len))
  251. print ('Hidden Neurons: ')
  252. for layer_neurons in self.hidden_neurons:
  253. for neuron in layer_neurons:
  254. print (neuron.debug())
  255. for neuron in self.output_neurons:
  256. print (neuron.debug())
  257. print (self.weights)
  258. # -------------------------------------------------------------------------
  259. def debug_weight_index(self, cnt_list, layer, next_layer_index, current_layer_index):
  260. """
  261. 测试用,生成权重下标。如连接3和8号神经元的权重返回 'w_3_8'
  262. layer 是从输入层开始为0, 依次+1
  263. 调用方式:
  264. 在构造函数生成weights的循环最内部 pair.append(self.debug_weight_index(cnt_list, i, j, k))
  265. """
  266. # 当前层起始位置
  267. current_layer_start_index = 0
  268. for i in range(0, layer):
  269. current_layer_start_index += cnt_list[i]
  270. # 下一层的起始位置
  271. next_layer_start_index = current_layer_start_index + cnt_list[layer]
  272. idx1, idx2 = current_layer_start_index + current_layer_index, \
  273. next_layer_start_index + next_layer_index
  274. #return 'w_%d_%d' % (idx1, idx2)
  275. return float(idx1) + idx2 / 10.0 # temp test
  276. if __name__ == '__main__':
  277. X = [ [[0, 0], 0], [[0, 1], 1], [[1, 0], 1], [[1, 1], 0] ]
  278. T = [[0.1, 0.085], [-0.12, 0.88], [1.13, 0.1], [0.8, 1.2]] # 期望输出:[0, 1, 1, 0]
  279. nn = BPNN(2, 1, [2], 0.2)
  280. # -----test-----
  281. print (nn.fit(X))
  282. print (nn._correct_rate)
  283. print (nn.predict(T))
  284. #nn.debug()
添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注