multi_classification

多種模型進行多分類問題分析:


code

'''

import numpy as np

import matplotlib.pyplot as plt

N = 100 # 每個類中的樣本點

D = 2 # 維度

K = 3 # 類別個數

X = np.zeros((N*K,D)) # 樣本input

y = np.zeros(N*K, dtype='uint8') # 類別標簽

for j in range(K):

ix = range(N*j,N*(j+1))

r = np.linspace(0.0,1,N) # radius

t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta

X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]

y[ix] = j

# 可視化一下我們的樣本點

plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)

#plt.show()

W = 0.01 * np.random.randn(D,K)

b = np.zeros((1,K))

#需要自己敲定的步長和正則化系數

step_size = 1e-0

reg = 1e-3 #正則化系數

#梯度下降迭代循環

num_examples = X.shape[0]

for i in range(200):

# 計算類別得分, 結果矩陣為[N x K]

scores = np.dot(X, W) + b

# 計算類別概率

exp_scores = np.exp(scores)

probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]

# 計算損失loss(包括互熵損失和正則化部分)

corect_logprobs = -np.log(probs[range(num_examples),y])

data_loss = np.sum(corect_logprobs)/num_examples

reg_loss = 0.5*reg*np.sum(W*W)

loss = data_loss + reg_loss

if i % 10 == 0:

print ("iteration %d: loss %f" % (i, loss))

# 計算得分上的梯度

dscores = probs

dscores[range(num_examples),y] -= 1

dscores /= num_examples

# 計算和回傳梯度

dW = np.dot(X.T, dscores)

db = np.sum(dscores, axis=0, keepdims=True)

dW += reg*W # 正則化梯度

#參數更新

W += -step_size * dW

b += -step_size * db

#評估準確度

scores = np.dot(X, W) + b

predicted_class = np.argmax(scores, axis=1)

print ('training accuracy: %.2f' % (np.mean(predicted_class == y)))

'''



Neural Network


# 隨機初始化參數

h = 100 # 隱層大小

W = 0.01 * np.random.randn(D,h)

b = np.zeros((1,h))

W2 = 0.01 * np.random.randn(h,K)

b2 = np.zeros((1,K))

# 手動敲定的幾個參數

step_size = 1e-0

reg = 1e-3 # 正則化參數

# 梯度迭代與循環

num_examples = X.shape[0]

for i in range(10000):

hidden_layer = np.maximum(0, np.dot(X, W) + b) #使用的ReLU神經元

scores = np.dot(hidden_layer, W2) + b2

# 計算類別概率

exp_scores = np.exp(scores)

probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]

# 計算互熵損失與正則化項

corect_logprobs = -np.log(probs[range(num_examples),y])

data_loss = np.sum(corect_logprobs)/num_examples

reg_loss = 0.5*reg*np.sum(W*W) + 0.5*reg*np.sum(W2*W2)

loss = data_loss + reg_loss

if i % 1000 == 0:

print ("iteration %d: loss %f" % (i, loss))

# 計算梯度

dscores = probs

dscores[range(num_examples),y] -= 1

dscores /= num_examples

# 梯度回傳

dW2 = np.dot(hidden_layer.T, dscores)

db2 = np.sum(dscores, axis=0, keepdims=True)

dhidden = np.dot(dscores, W2.T)

dhidden[hidden_layer <= 0] = 0

# 拿到最后W,b上的梯度

dW = np.dot(X.T, dhidden)

db = np.sum(dhidden, axis=0, keepdims=True)

# 加上正則化梯度部分

dW2 += reg * W2

dW += reg * W

# 參數迭代與更新

W += -step_size * dW

b += -step_size * db

W2 += -step_size * dW2

b2 += -step_size * db2

#計算分類準確度

hidden_layer = np.maximum(0, np.dot(X, W) + b)

scores = np.dot(hidden_layer, W2) + b2

predicted_class = np.argmax(scores, axis=1)

print ('training accuracy: %.2f' % (np.mean(predicted_class == y)))

最后編輯于
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。

推薦閱讀更多精彩內容