SlideShare a Scribd company logo
'2.5.0'
<__array_function__ internals>:5: VisibleDeprecationWarning: Creating an ndarray fro
m ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays w
ith different lengths or shapes) is deprecated. If you meant to do this, you must sp
ecify 'dtype=object' when creating the ndarray

C:Userswaleeanaconda3libsite-packageskerasdatasetsimdb.py:155: VisibleDeprec
ationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-t
uple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated.
If you meant to do this, you must specify 'dtype=object' when creating the ndarray

x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])

C:Userswaleeanaconda3libsite-packageskerasdatasetsimdb.py:156: VisibleDeprec
ationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-t
uple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated.
If you meant to do this, you must specify 'dtype=object' when creating the ndarray

x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])

[1,

14,

22,

16,

43,

530,

973,

1622,

1385,

65,

458,

4468,

66,

3941,

4,

173,

36,

256,

5,

25,

100,

43,

838,

112,

50,

670,

2,

9,

35,

480,

284,

5,

In [1]: # Import libraries

from tensorflow import keras

from tensorflow.keras.datasets import mnist

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense, Dropout

from tensorflow.keras.optimizers import RMSprop

In [2]: import keras

keras.__version__

Out[2]:
In [3]: # Load the dataset

from keras.datasets import imdb

(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=1000
In [4]: train_data[0]

Out[4]:
150,

4,

172,

112,

167,

2,

336,

385,

39,

4,

172,

4536,

1111,

17,

546,

38,

13,

447,

4,

192,

50,

16,

6,

147,

2025,

19,

14,

22,

4,

1920,

4613,

469,

4,

22,

71,

87,

12,

16,

43,

530,

38,

76,

15,

13,

1247,

4,

22,

17,

515,

17,

12,

16,

626,

18,

2,

5,

62,

386,

12,

8,

316,

8,

106,

5,

4,

2223,

5244,

16,

480,
66,

3785,

33,

4,

130,

12,

16,

38,

619,

5,

25,

124,

51,

36,

135,

48,

25,

1415,

33,

6,

22,

12,

215,

28,

77,

52,

5,

14,

407,

16,

82,

2,

8,

4,

107,

117,

5952,

15,

256,

4,

2,

7,

3766,

5,

723,

36,

71,

43,

530,

476,

26,

400,

317,

46,

7,

4,

2,

1029,

13,

104,

88,

4,

381,

15,

297,

98,

32,

2071,

56,
26,

141,

6,

194,

7486,

18,

4,

226,

22,

21,

134,

476,

26,

480,

5,

144,

30,

5535,

18,

51,

36,

28,

224,

92,

25,

104,

4,

226,

65,

16,

38,

1334,

88,

12,

16,

283,

5,

16,

4472,

113,

103,

32,

15,

16,

5345,

19,

178,

32]
1
9999
In [5]: train_labels[0]

Out[5]:
In [6]: #Due to 10,000 most recent words restriction, no word index will exceed 10,000:

max([max(sequence) for sequence in train_data])
Out[6]:
In [7]: # word_index is a dictionary mapping words to an integer index

word_index = imdb.get_word_index()

# We reverse it, mapping integer indices to words

reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

# We decode the review; note that our indices were offset by 3

# because 0, 1 and 2 are reserved indices for "padding", "start of sequence", and "u
decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]
decoded_review
"? this film was just brilliant casting location scenery story direction everyone's
really suited the part they played and you could just imagine being there robert ? i
s an amazing actor and now the same being director ? father came from the same scott
ish island as myself so i loved the fact there was a real connection with this film
the witty remarks throughout the film were great it was just brilliant so much that
i bought the film as soon as it was released for ? and would recommend it to everyon
e to watch and the fly fishing was amazing really cried at the end it was so sad and
you know what they say if you cry at a film it must have been good and this definite
ly was also ? to the two little boy's that played the ? of norman and paul they were
just brilliant children are often left out of the ? list i think because the stars t
hat play them all grown up are such a big profile for the whole film but these child
ren are amazing and should be praised for what they have done don't you think the wh
ole story was so lovely because it was true and was someone's life after all that wa
s shared with us all"
array([0., 1., 1., ..., 0., 0., 0.])
C:Userswaleeanaconda3libsite-packagestensorflowpythonkerasoptimizer_v2opti
mizer_v2.py:374: UserWarning: The `lr` argument is deprecated, use `learning_rate` i
nstead.

warnings.warn(

Out[7]:
In [8]: import numpy as np

def vectorize_sequences(sequences, dimension=10000):



results = np.zeros((len(sequences), dimension))

for i, sequence in enumerate(sequences):

results[i, sequence] = 1. # set specific indices of results[i] to 1s

return results

# training data

x_train = vectorize_sequences(train_data)

# test data

x_test = vectorize_sequences(test_data)

In [9]: # sample train data

x_train[0]

Out[9]:
In [10]: # Vectorized labels

y_train = np.asarray(train_labels).astype('float32')

y_test = np.asarray(test_labels).astype('float32')

In [11]: #The Keras implementation

from keras import models

from keras import layers

model = models.Sequential()

model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))

model.add(layers.Dense(16, activation='relu'))

model.add(layers.Dense(1, activation='sigmoid'))

In [12]: model.compile(optimizer='rmsprop',

loss='binary_crossentropy',

metrics=['accuracy'])

In [13]: from keras import optimizers

model.compile(optimizer=optimizers.RMSprop(lr=0.001),

loss='binary_crossentropy',

metrics=['accuracy'])

from keras import losses

from keras import metrics

model.compile(optimizer=optimizers.RMSprop(lr=0.001),

loss=losses.binary_crossentropy,

metrics=[metrics.binary_accuracy])

In [14]: x_val = x_train[:10000]
Epoch 1/20

30/30 [==============================] - 63s 419ms/step - loss: 0.6034 - binary_accu
racy: 0.6815 - val_loss: 0.3960 - val_binary_accuracy: 0.8684

Epoch 2/20

30/30 [==============================] - 1s 35ms/step - loss: 0.3324 - binary_accura
cy: 0.9012 - val_loss: 0.3173 - val_binary_accuracy: 0.8795

Epoch 3/20

30/30 [==============================] - 1s 30ms/step - loss: 0.2365 - binary_accura
cy: 0.9290 - val_loss: 0.2801 - val_binary_accuracy: 0.8919

Epoch 4/20

30/30 [==============================] - 1s 27ms/step - loss: 0.1827 - binary_accura
cy: 0.9437 - val_loss: 0.2915 - val_binary_accuracy: 0.8844

Epoch 5/20

30/30 [==============================] - 1s 23ms/step - loss: 0.1464 - binary_accura
cy: 0.9582 - val_loss: 0.2773 - val_binary_accuracy: 0.8912

Epoch 6/20

30/30 [==============================] - 1s 29ms/step - loss: 0.1130 - binary_accura
cy: 0.9705 - val_loss: 0.2915 - val_binary_accuracy: 0.8887

Epoch 7/20

30/30 [==============================] - 1s 26ms/step - loss: 0.0957 - binary_accura
cy: 0.9756 - val_loss: 0.3162 - val_binary_accuracy: 0.8774

Epoch 8/20

30/30 [==============================] - 1s 22ms/step - loss: 0.0800 - binary_accura
cy: 0.9801 - val_loss: 0.3175 - val_binary_accuracy: 0.8851

Epoch 9/20

30/30 [==============================] - 1s 22ms/step - loss: 0.0627 - binary_accura
cy: 0.9852 - val_loss: 0.3557 - val_binary_accuracy: 0.8737

Epoch 10/20

30/30 [==============================] - 1s 22ms/step - loss: 0.0510 - binary_accura
cy: 0.9884 - val_loss: 0.3607 - val_binary_accuracy: 0.8816

Epoch 11/20

30/30 [==============================] - 1s 22ms/step - loss: 0.0378 - binary_accura
cy: 0.9934 - val_loss: 0.4033 - val_binary_accuracy: 0.8749

Epoch 12/20

30/30 [==============================] - 1s 23ms/step - loss: 0.0323 - binary_accura
cy: 0.9940 - val_loss: 0.4143 - val_binary_accuracy: 0.8803

Epoch 13/20

30/30 [==============================] - 1s 23ms/step - loss: 0.0234 - binary_accura
cy: 0.9970 - val_loss: 0.4700 - val_binary_accuracy: 0.8665

Epoch 14/20

30/30 [==============================] - 1s 22ms/step - loss: 0.0174 - binary_accura
cy: 0.9984 - val_loss: 0.4743 - val_binary_accuracy: 0.8756

Epoch 15/20

30/30 [==============================] - 1s 24ms/step - loss: 0.0116 - binary_accura
cy: 0.9995 - val_loss: 0.5077 - val_binary_accuracy: 0.8741

Epoch 16/20

30/30 [==============================] - 1s 25ms/step - loss: 0.0088 - binary_accura
cy: 0.9996 - val_loss: 0.5462 - val_binary_accuracy: 0.8709

Epoch 17/20

30/30 [==============================] - 1s 29ms/step - loss: 0.0076 - binary_accura
cy: 0.9993 - val_loss: 0.5741 - val_binary_accuracy: 0.8708

Epoch 18/20

30/30 [==============================] - 1s 28ms/step - loss: 0.0041 - binary_accura
cy: 1.0000 - val_loss: 0.6180 - val_binary_accuracy: 0.8657

Epoch 19/20

30/30 [==============================] - 1s 27ms/step - loss: 0.0063 - binary_accura
cy: 0.9989 - val_loss: 0.6356 - val_binary_accuracy: 0.8692

Epoch 20/20

partial_x_train = x_train[10000:]
y_val = y_train[:10000]

partial_y_train = y_train[10000:]
In [15]: history = model.fit(partial_x_train,

partial_y_train,

epochs=20,

batch_size=512,

validation_data=(x_val, y_val))
30/30 [==============================] - 1s 34ms/step - loss: 0.0025 - binary_accura
cy: 0.9999 - val_loss: 0.6637 - val_binary_accuracy: 0.8687

{'loss': [0.52616947889328, 0.31405124068260193, 0.23073254525661469, 0.180891424417
49573, 0.15155072510242462, 0.11920173466205597, 0.09928888082504272, 0.080168023705
48248, 0.06698485463857651, 0.05347074940800667, 0.04215071722865105, 0.032081160694
36073, 0.024609368294477463, 0.020660480484366417, 0.012686074711382389, 0.009793567
471206188, 0.008989308960735798, 0.004304599016904831, 0.008949910290539265, 0.00232
2724089026451], 'binary_accuracy': [0.7734000086784363, 0.902733325958252, 0.9275333
285331726, 0.9417999982833862, 0.9536666870117188, 0.9657999873161316, 0.97226667404
1748, 0.9791333079338074, 0.9835333228111267, 0.9874666929244995, 0.991133332252502
4, 0.9941333532333374, 0.9962666630744934, 0.9971333146095276, 0.9986666440963745,
0.9995999932289124, 0.9989333152770996, 0.9998666644096375, 0.9980666637420654, 0.99
99333620071411], 'val_loss': [0.3960328698158264, 0.3172730803489685, 0.280055940151
2146, 0.29149702191352844, 0.27728235721588135, 0.2915444076061249, 0.31624060869216
92, 0.31750768423080444, 0.3556799292564392, 0.3607264757156372, 0.4032689929008484,
0.4143247604370117, 0.47000113129615784, 0.4742533564567566, 0.5077142715454102, 0.5
462456345558167, 0.5740538835525513, 0.6180163621902466, 0.6355932950973511, 0.66371
50645256042], 'val_binary_accuracy': [0.868399977684021, 0.8794999718666077, 0.89190
00029563904, 0.8844000101089478, 0.8912000060081482, 0.888700008392334, 0.8773999810
218811, 0.8851000070571899, 0.8737000226974487, 0.881600022315979, 0.874899983406066
9, 0.880299985408783, 0.8665000200271606, 0.8755999803543091, 0.8741000294685364, 0.
8708999752998352, 0.8708000183105469, 0.8657000064849854, 0.8691999912261963, 0.8687
000274658203]}

In [16]: history_dict = history.history

history_dict.keys()

print(history_dict)

In [17]: import matplotlib.pyplot as plt

acc = history.history['binary_accuracy']

val_acc = history.history['val_binary_accuracy']

#acc = history.history['acc']

#val_acc = history.history['val_acc']

loss = history.history['loss']

val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

# "bo" is for "blue dot"

plt.plot(epochs, loss, 'bo', label='Training loss')

# b is for "solid blue line"

plt.plot(epochs, val_loss, 'b', label='Validation loss')

plt.title('Training and validation loss')

plt.xlabel('Epochs')

plt.ylabel('Loss')

plt.legend()

plt.show()

In [18]: plt.clf() # clear figure
Epoch 1/4

49/49 [==============================] - 4s 22ms/step - loss: 0.5358 - accuracy: 0.7
571

Epoch 2/4

49/49 [==============================] - 1s 16ms/step - loss: 0.2672 - accuracy: 0.9
079

Epoch 3/4

49/49 [==============================] - 1s 17ms/step - loss: 0.1949 - accuracy: 0.9
321

Epoch 4/4

49/49 [==============================] - 1s 16ms/step - loss: 0.1659 - accuracy: 0.9
411

782/782 [==============================] - 8s 2ms/step - loss: 0.3059 - accuracy: 0.
8796

[0.3059064447879791, 0.8795599937438965]
array([[0.22144157],

[0.999939 ],

[0.9658772 ],

...,

#acc_values = history_dict['acc']

#val_acc_values = history_dict['val_acc']

acc = history.history['binary_accuracy']

val_acc = history.history['val_binary_accuracy']

plt.plot(epochs, acc, 'bo', label='Training acc')

plt.plot(epochs, val_acc, 'b', label='Validation acc')

plt.title('Training and validation accuracy')

plt.xlabel('Epochs')

plt.ylabel('Loss')

plt.legend()

plt.show()

In [19]: model = models.Sequential()

model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))

model.add(layers.Dense(16, activation='relu'))

model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop',

loss='binary_crossentropy',

metrics=['accuracy'])

model.fit(x_train, y_train, epochs=4, batch_size=512)

results = model.evaluate(x_test, y_test)
In [20]: results

Out[20]:
In [21]: model.predict(x_test)

Out[21]:
[0.16461223],

[0.10813493],

[0.617234 ]], dtype=float32)
In [ ]:

More Related Content

PDF
Assignment 5.2.pdf
PDF
digit recognition recognition in computer science
PDF
Ruby Language - A quick tour
PPTX
Basic Graphics with R
PDF
Chapter3_Visualizations2.pdf
PDF
TreSQL
PDF
cluster(python)
PDF
Mini-lab 1: Stochastic Gradient Descent classifier, Optimizing Logistic Regre...
Assignment 5.2.pdf
digit recognition recognition in computer science
Ruby Language - A quick tour
Basic Graphics with R
Chapter3_Visualizations2.pdf
TreSQL
cluster(python)
Mini-lab 1: Stochastic Gradient Descent classifier, Optimizing Logistic Regre...

Similar to Assignment 5.1.pdf (20)

DOCX
LSTM Framework For Univariate Time series
KEY
Boston Predictive Analytics: Linear and Logistic Regression Using R - Interme...
PPTX
Image classification using cnn
PDF
Testing in those hard to reach places
 
PDF
Cloudera - A Taste of random decision forests
PDF
R and data mining
PDF
Davide Cerbo - Kotlin: forse è la volta buona - Codemotion Milan 2017
PDF
Performance tests - it's a trap
PDF
maXbox starter65 machinelearning3
PDF
Particle Filter Tracking in Python
DOCX
Ns2programs
PDF
maXbox starter67 machine learning V
PDF
NCCU: Statistics in the Criminal Justice System, R basics and Simulation - Pr...
PDF
R Programming: Mathematical Functions In R
PDF
Idioms in swift 2016 05c
PPTX
wk5ppt1_Titanic
PDF
iOS와 케라스의 만남
PPTX
Graphic programming using Turtle class in Python
PPTX
Programming Assignment Help
PDF
Coding in Style
LSTM Framework For Univariate Time series
Boston Predictive Analytics: Linear and Logistic Regression Using R - Interme...
Image classification using cnn
Testing in those hard to reach places
 
Cloudera - A Taste of random decision forests
R and data mining
Davide Cerbo - Kotlin: forse è la volta buona - Codemotion Milan 2017
Performance tests - it's a trap
maXbox starter65 machinelearning3
Particle Filter Tracking in Python
Ns2programs
maXbox starter67 machine learning V
NCCU: Statistics in the Criminal Justice System, R basics and Simulation - Pr...
R Programming: Mathematical Functions In R
Idioms in swift 2016 05c
wk5ppt1_Titanic
iOS와 케라스의 만남
Graphic programming using Turtle class in Python
Programming Assignment Help
Coding in Style
Ad

More from dash41 (9)

PDF
Assignment7.pdf
PDF
Assignment 6.3.pdf
PDF
Assignment 6.2a.pdf
PDF
Assignment 6.1.pdf
PDF
Assignment 5.3.pdf
PDF
Assignment 4.pdf
PDF
Assignment 3.pdf
PDF
rdbms.pdf
PDF
documentsdb.pdf
Assignment7.pdf
Assignment 6.3.pdf
Assignment 6.2a.pdf
Assignment 6.1.pdf
Assignment 5.3.pdf
Assignment 4.pdf
Assignment 3.pdf
rdbms.pdf
documentsdb.pdf
Ad

Recently uploaded (20)

PPTX
(Ali Hamza) Roll No: (F24-BSCS-1103).pptx
PPTX
mbdjdhjjodule 5-1 rhfhhfjtjjhafbrhfnfbbfnb
PDF
Data Engineering Interview Questions & Answers Batch Processing (Spark, Hadoo...
PPTX
Market Analysis -202507- Wind-Solar+Hybrid+Street+Lights+for+the+North+Amer...
PDF
Business Analytics and business intelligence.pdf
PPTX
IBA_Chapter_11_Slides_Final_Accessible.pptx
PDF
REAL ILLUMINATI AGENT IN KAMPALA UGANDA CALL ON+256765750853/0705037305
PPTX
Introduction-to-Cloud-ComputingFinal.pptx
PPTX
climate analysis of Dhaka ,Banglades.pptx
PDF
Mega Projects Data Mega Projects Data
PPTX
importance of Data-Visualization-in-Data-Science. for mba studnts
PPTX
Microsoft-Fabric-Unifying-Analytics-for-the-Modern-Enterprise Solution.pptx
PPTX
Qualitative Qantitative and Mixed Methods.pptx
PPTX
Modelling in Business Intelligence , information system
PPTX
A Complete Guide to Streamlining Business Processes
PPTX
iec ppt-1 pptx icmr ppt on rehabilitation.pptx
PPTX
Leprosy and NLEP programme community medicine
PPTX
CEE 2 REPORT G7.pptxbdbshjdgsgjgsjfiuhsd
PDF
Oracle OFSAA_ The Complete Guide to Transforming Financial Risk Management an...
(Ali Hamza) Roll No: (F24-BSCS-1103).pptx
mbdjdhjjodule 5-1 rhfhhfjtjjhafbrhfnfbbfnb
Data Engineering Interview Questions & Answers Batch Processing (Spark, Hadoo...
Market Analysis -202507- Wind-Solar+Hybrid+Street+Lights+for+the+North+Amer...
Business Analytics and business intelligence.pdf
IBA_Chapter_11_Slides_Final_Accessible.pptx
REAL ILLUMINATI AGENT IN KAMPALA UGANDA CALL ON+256765750853/0705037305
Introduction-to-Cloud-ComputingFinal.pptx
climate analysis of Dhaka ,Banglades.pptx
Mega Projects Data Mega Projects Data
importance of Data-Visualization-in-Data-Science. for mba studnts
Microsoft-Fabric-Unifying-Analytics-for-the-Modern-Enterprise Solution.pptx
Qualitative Qantitative and Mixed Methods.pptx
Modelling in Business Intelligence , information system
A Complete Guide to Streamlining Business Processes
iec ppt-1 pptx icmr ppt on rehabilitation.pptx
Leprosy and NLEP programme community medicine
CEE 2 REPORT G7.pptxbdbshjdgsgjgsjfiuhsd
Oracle OFSAA_ The Complete Guide to Transforming Financial Risk Management an...

Assignment 5.1.pdf

  • 1. '2.5.0' <__array_function__ internals>:5: VisibleDeprecationWarning: Creating an ndarray fro m ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays w ith different lengths or shapes) is deprecated. If you meant to do this, you must sp ecify 'dtype=object' when creating the ndarray C:Userswaleeanaconda3libsite-packageskerasdatasetsimdb.py:155: VisibleDeprec ationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-t uple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) C:Userswaleeanaconda3libsite-packageskerasdatasetsimdb.py:156: VisibleDeprec ationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-t uple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) [1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, In [1]: # Import libraries from tensorflow import keras from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.optimizers import RMSprop In [2]: import keras keras.__version__ Out[2]: In [3]: # Load the dataset from keras.datasets import imdb (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=1000 In [4]: train_data[0] Out[4]:
  • 4. 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32] 1 9999 In [5]: train_labels[0] Out[5]: In [6]: #Due to 10,000 most recent words restriction, no word index will exceed 10,000: max([max(sequence) for sequence in train_data]) Out[6]: In [7]: # word_index is a dictionary mapping words to an integer index word_index = imdb.get_word_index() # We reverse it, mapping integer indices to words reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) # We decode the review; note that our indices were offset by 3 # because 0, 1 and 2 are reserved indices for "padding", "start of sequence", and "u decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0] decoded_review
  • 5. "? this film was just brilliant casting location scenery story direction everyone's really suited the part they played and you could just imagine being there robert ? i s an amazing actor and now the same being director ? father came from the same scott ish island as myself so i loved the fact there was a real connection with this film the witty remarks throughout the film were great it was just brilliant so much that i bought the film as soon as it was released for ? and would recommend it to everyon e to watch and the fly fishing was amazing really cried at the end it was so sad and you know what they say if you cry at a film it must have been good and this definite ly was also ? to the two little boy's that played the ? of norman and paul they were just brilliant children are often left out of the ? list i think because the stars t hat play them all grown up are such a big profile for the whole film but these child ren are amazing and should be praised for what they have done don't you think the wh ole story was so lovely because it was true and was someone's life after all that wa s shared with us all" array([0., 1., 1., ..., 0., 0., 0.]) C:Userswaleeanaconda3libsite-packagestensorflowpythonkerasoptimizer_v2opti mizer_v2.py:374: UserWarning: The `lr` argument is deprecated, use `learning_rate` i nstead. warnings.warn( Out[7]: In [8]: import numpy as np def vectorize_sequences(sequences, dimension=10000): results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. # set specific indices of results[i] to 1s return results # training data x_train = vectorize_sequences(train_data) # test data x_test = vectorize_sequences(test_data) In [9]: # sample train data x_train[0] Out[9]: In [10]: # Vectorized labels y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') In [11]: #The Keras implementation from keras import models from keras import layers model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) In [12]: model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) In [13]: from keras import optimizers model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy']) from keras import losses from keras import metrics model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy]) In [14]: x_val = x_train[:10000]
  • 6. Epoch 1/20 30/30 [==============================] - 63s 419ms/step - loss: 0.6034 - binary_accu racy: 0.6815 - val_loss: 0.3960 - val_binary_accuracy: 0.8684 Epoch 2/20 30/30 [==============================] - 1s 35ms/step - loss: 0.3324 - binary_accura cy: 0.9012 - val_loss: 0.3173 - val_binary_accuracy: 0.8795 Epoch 3/20 30/30 [==============================] - 1s 30ms/step - loss: 0.2365 - binary_accura cy: 0.9290 - val_loss: 0.2801 - val_binary_accuracy: 0.8919 Epoch 4/20 30/30 [==============================] - 1s 27ms/step - loss: 0.1827 - binary_accura cy: 0.9437 - val_loss: 0.2915 - val_binary_accuracy: 0.8844 Epoch 5/20 30/30 [==============================] - 1s 23ms/step - loss: 0.1464 - binary_accura cy: 0.9582 - val_loss: 0.2773 - val_binary_accuracy: 0.8912 Epoch 6/20 30/30 [==============================] - 1s 29ms/step - loss: 0.1130 - binary_accura cy: 0.9705 - val_loss: 0.2915 - val_binary_accuracy: 0.8887 Epoch 7/20 30/30 [==============================] - 1s 26ms/step - loss: 0.0957 - binary_accura cy: 0.9756 - val_loss: 0.3162 - val_binary_accuracy: 0.8774 Epoch 8/20 30/30 [==============================] - 1s 22ms/step - loss: 0.0800 - binary_accura cy: 0.9801 - val_loss: 0.3175 - val_binary_accuracy: 0.8851 Epoch 9/20 30/30 [==============================] - 1s 22ms/step - loss: 0.0627 - binary_accura cy: 0.9852 - val_loss: 0.3557 - val_binary_accuracy: 0.8737 Epoch 10/20 30/30 [==============================] - 1s 22ms/step - loss: 0.0510 - binary_accura cy: 0.9884 - val_loss: 0.3607 - val_binary_accuracy: 0.8816 Epoch 11/20 30/30 [==============================] - 1s 22ms/step - loss: 0.0378 - binary_accura cy: 0.9934 - val_loss: 0.4033 - val_binary_accuracy: 0.8749 Epoch 12/20 30/30 [==============================] - 1s 23ms/step - loss: 0.0323 - binary_accura cy: 0.9940 - val_loss: 0.4143 - val_binary_accuracy: 0.8803 Epoch 13/20 30/30 [==============================] - 1s 23ms/step - loss: 0.0234 - binary_accura cy: 0.9970 - val_loss: 0.4700 - val_binary_accuracy: 0.8665 Epoch 14/20 30/30 [==============================] - 1s 22ms/step - loss: 0.0174 - binary_accura cy: 0.9984 - val_loss: 0.4743 - val_binary_accuracy: 0.8756 Epoch 15/20 30/30 [==============================] - 1s 24ms/step - loss: 0.0116 - binary_accura cy: 0.9995 - val_loss: 0.5077 - val_binary_accuracy: 0.8741 Epoch 16/20 30/30 [==============================] - 1s 25ms/step - loss: 0.0088 - binary_accura cy: 0.9996 - val_loss: 0.5462 - val_binary_accuracy: 0.8709 Epoch 17/20 30/30 [==============================] - 1s 29ms/step - loss: 0.0076 - binary_accura cy: 0.9993 - val_loss: 0.5741 - val_binary_accuracy: 0.8708 Epoch 18/20 30/30 [==============================] - 1s 28ms/step - loss: 0.0041 - binary_accura cy: 1.0000 - val_loss: 0.6180 - val_binary_accuracy: 0.8657 Epoch 19/20 30/30 [==============================] - 1s 27ms/step - loss: 0.0063 - binary_accura cy: 0.9989 - val_loss: 0.6356 - val_binary_accuracy: 0.8692 Epoch 20/20 partial_x_train = x_train[10000:] y_val = y_train[:10000] partial_y_train = y_train[10000:] In [15]: history = model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))
  • 7. 30/30 [==============================] - 1s 34ms/step - loss: 0.0025 - binary_accura cy: 0.9999 - val_loss: 0.6637 - val_binary_accuracy: 0.8687 {'loss': [0.52616947889328, 0.31405124068260193, 0.23073254525661469, 0.180891424417 49573, 0.15155072510242462, 0.11920173466205597, 0.09928888082504272, 0.080168023705 48248, 0.06698485463857651, 0.05347074940800667, 0.04215071722865105, 0.032081160694 36073, 0.024609368294477463, 0.020660480484366417, 0.012686074711382389, 0.009793567 471206188, 0.008989308960735798, 0.004304599016904831, 0.008949910290539265, 0.00232 2724089026451], 'binary_accuracy': [0.7734000086784363, 0.902733325958252, 0.9275333 285331726, 0.9417999982833862, 0.9536666870117188, 0.9657999873161316, 0.97226667404 1748, 0.9791333079338074, 0.9835333228111267, 0.9874666929244995, 0.991133332252502 4, 0.9941333532333374, 0.9962666630744934, 0.9971333146095276, 0.9986666440963745, 0.9995999932289124, 0.9989333152770996, 0.9998666644096375, 0.9980666637420654, 0.99 99333620071411], 'val_loss': [0.3960328698158264, 0.3172730803489685, 0.280055940151 2146, 0.29149702191352844, 0.27728235721588135, 0.2915444076061249, 0.31624060869216 92, 0.31750768423080444, 0.3556799292564392, 0.3607264757156372, 0.4032689929008484, 0.4143247604370117, 0.47000113129615784, 0.4742533564567566, 0.5077142715454102, 0.5 462456345558167, 0.5740538835525513, 0.6180163621902466, 0.6355932950973511, 0.66371 50645256042], 'val_binary_accuracy': [0.868399977684021, 0.8794999718666077, 0.89190 00029563904, 0.8844000101089478, 0.8912000060081482, 0.888700008392334, 0.8773999810 218811, 0.8851000070571899, 0.8737000226974487, 0.881600022315979, 0.874899983406066 9, 0.880299985408783, 0.8665000200271606, 0.8755999803543091, 0.8741000294685364, 0. 8708999752998352, 0.8708000183105469, 0.8657000064849854, 0.8691999912261963, 0.8687 000274658203]} In [16]: history_dict = history.history history_dict.keys() print(history_dict) In [17]: import matplotlib.pyplot as plt acc = history.history['binary_accuracy'] val_acc = history.history['val_binary_accuracy'] #acc = history.history['acc'] #val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() In [18]: plt.clf() # clear figure
  • 8. Epoch 1/4 49/49 [==============================] - 4s 22ms/step - loss: 0.5358 - accuracy: 0.7 571 Epoch 2/4 49/49 [==============================] - 1s 16ms/step - loss: 0.2672 - accuracy: 0.9 079 Epoch 3/4 49/49 [==============================] - 1s 17ms/step - loss: 0.1949 - accuracy: 0.9 321 Epoch 4/4 49/49 [==============================] - 1s 16ms/step - loss: 0.1659 - accuracy: 0.9 411 782/782 [==============================] - 8s 2ms/step - loss: 0.3059 - accuracy: 0. 8796 [0.3059064447879791, 0.8795599937438965] array([[0.22144157], [0.999939 ], [0.9658772 ], ..., #acc_values = history_dict['acc'] #val_acc_values = history_dict['val_acc'] acc = history.history['binary_accuracy'] val_acc = history.history['val_binary_accuracy'] plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() In [19]: model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=4, batch_size=512) results = model.evaluate(x_test, y_test) In [20]: results Out[20]: In [21]: model.predict(x_test) Out[21]: