728x90
import keras
import numpy as np
import matplotlib.pyplot as plt
# 로이터 기사 데이터 셋 로딩

from keras.datasets import reuters
(train_data, train_labels),(test_data, test_labels) = reuters.load_data(num_words=10000)​

>>

Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/reuters.npz 2110848/2110848 [==============================] - 0s 0us/step

 

train_data.shape

>> (8982,)

 

train_data[0]

>> [1, 2, 2, 8, 43, 10, 447, 5, 25, 207, 270, 5, 3095, 111, 16, 369, 186, 90, 67, 7, 89, 5, 19, 102, 6,

...

6, 109, 15, 17, 12]

 

word_index = reuters.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

decoded_newswire = ' '.join([reverse_word_index.get(i-3,'?') for i in train_data[0]])

>>

Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/reuters_word_index.json 550378/550378 [==============================] - 0s 0us/step

 

decoded_newswire

>> '? ? ? said as a result of its december acquisition of space co it expects earnings

train_labels[0]

>> 3

 

# 데이터의 준비
def vectorize_sequences(sequences, dimension=10000):

  results = np.zeros((len(sequences),dimension))

  for i, sequence in enumerate(sequences):
    results[i, sequence] = 1

  return results

# 데이터의 변환 
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
train_labels

>> array([ 3, 4, 3, ..., 25, 3, 25])

# 라벨 데이터의 encoding 
def to_one_hot(labels, dimension=46):
  results = np.zeros((len(labels),dimension))

  for i, sequence in enumerate(labels):
    results[i, sequence] = 1

  return results

one_hot_train_labels = to_one_hot(train_labels)
one_hot_test_labels = to_one_hot(test_labels)


# cateogorical 데이터로 변환
from keras.utils.np_utils import to_categorical

one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)


# 신경망 구성하기
#model.add(layers.Dense(64, activation='relu')) 숫자를 4로 바꿔서 실행해보기

from keras import models
from keras import layers

model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))

model.summary()

>>

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
#훈련데이터 쪼개기 학습/훈련/검증 데이터
# 훈련 데이터의 준비

x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
history = model.fit(partial_x_train, 
                    partial_y_train,
                    epochs=20,
                    batch_size=512,
                    validation_data=(x_val, y_val))
Epoch 1/20
16/16 [==============================] - 2s 67ms/step - loss: 2.4923 - accuracy: 0.5499 - val_loss: 1.6341 - val_accuracy: 0.6610
Epoch 2/20
16/16 [==============================] - 1s 53ms/step - loss: 1.3371 - accuracy: 0.7166 - val_loss: 1.2449 - val_accuracy: 0.7140
Epoch 3/20
16/16 [==============================] - 1s 55ms/step - loss: 0.9899 - accuracy: 0.7922 - val_loss: 1.1182 - val_accuracy: 0.7470
Epoch 4/20
16/16 [==============================] - 1s 54ms/step - loss: 0.7818 - accuracy: 0.8383 - val_loss: 1.0020 - val_accuracy: 0.7930
Epoch 5/20
16/16 [==============================] - 1s 57ms/step - loss: 0.6183 - accuracy: 0.8732 - val_loss: 0.9504 - val_accuracy: 0.8120
Epoch 6/20
16/16 [==============================] - 1s 56ms/step - loss: 0.4971 - accuracy: 0.9008 - val_loss: 0.9023 - val_accuracy: 0.8120
Epoch 7/20
16/16 [==============================] - 1s 55ms/step - loss: 0.3934 - accuracy: 0.9202 - val_loss: 0.9013 - val_accuracy: 0.8090
Epoch 8/20
16/16 [==============================] - 1s 55ms/step - loss: 0.3245 - accuracy: 0.9316 - val_loss: 0.9093 - val_accuracy: 0.8090
Epoch 9/20
16/16 [==============================] - 1s 52ms/step - loss: 0.2670 - accuracy: 0.9399 - val_loss: 0.8931 - val_accuracy: 0.8150
Epoch 10/20
16/16 [==============================] - 1s 54ms/step - loss: 0.2295 - accuracy: 0.9466 - val_loss: 0.8993 - val_accuracy: 0.8120
Epoch 11/20
16/16 [==============================] - 1s 53ms/step - loss: 0.1961 - accuracy: 0.9528 - val_loss: 0.9203 - val_accuracy: 0.8160
Epoch 12/20
16/16 [==============================] - 1s 53ms/step - loss: 0.1737 - accuracy: 0.9543 - val_loss: 0.9683 - val_accuracy: 0.8070
Epoch 13/20
...
Epoch 19/20
16/16 [==============================] - 1s 53ms/step - loss: 0.1153 - accuracy: 0.9573 - val_loss: 1.0541 - val_accuracy: 0.8090
Epoch 20/20
16/16 [==============================] - 1s 53ms/step - loss: 0.1104 - accuracy: 0.9577 - val_loss: 1.1655 - val_accuracy: 0.7970

 

# 학습 결과를 가져온다. 
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

epochs = range(1, len(loss) + 1)
#결과 시각화

plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs,val_loss, 'b-', label='Validation Loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

#결과 시각화 acc

plt.plot(epochs, acc, 'bo', label='Training Accuracy')
plt.plot(epochs,val_acc, 'b-', label='Validation Accuracy')
plt.title('Training and validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()

 

728x90

+ Recent posts