SlideShare a Scribd company logo
Using the code below, I need help with the following 3 things:
1) Write Python code to plot the images from the first epoch. Take a screenshot of the images
from the first epoch.
2) Write Python code to plot the images from the last epoch. Take a screenshot of the images
from the last epoch.
3) Comment on the network performance.
#Step 1: Import the required Python libraries:
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam,SGD
from keras.datasets import cifar10
#Step 2: Load the data.
#Loading the CIFAR10 data
(X, y), (_, _) = keras.datasets.cifar10.load_data()
#Selecting a single class of images
#The number was randomly chosen and any number
#between 1 and 10 can be chosen
X = X[y.flatten() == 8]
#Step 3: Define parameters to be used in later processes.
#Defining the Input shape
image_shape = (32, 32, 3)
latent_dimensions = 100
#Step 4: Define a utility function to build the generator.
def build_generator():
model = Sequential()
#Building the input layer
model.add(Dense(128 * 8 * 8, activation="relu",
input_dim=latent_dimensions))
model.add(Reshape((8, 8, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(Conv2D(3, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
#Generating the output image
noise = Input(shape=(latent_dimensions,))
image = model(noise)
return Model(noise, image)
#Step 5: Define a utility function to build the discriminator.
def build_discriminator():
#Building the convolutional layers
#to classify whether an image is real or fake
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2,
input_shape=image_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
#Building the output layer
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
image = Input(shape=image_shape)
validity = model(image)
return Model(image, validity)
#Step 6: Define a utility function to display the generated images.
def display_images():
r, c = 4,4
noise = np.random.normal(0, 1, (r * c,latent_dimensions))
generated_images = generator.predict(noise)
#Scaling the generated images
generated_images = 0.5 * generated_images + 0.5
fig, axs = plt.subplots(r, c)
count = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(generated_images[count, :,:,])
axs[i,j].axis('off')
count += 1
plt.show()
plt.close()
#Step 7: Build the GAN.
# Building and compiling the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5),
metrics=['accuracy'])
#Making the discriminator untrainable
#so that the generator can learn from fixed gradient
discriminator.trainable = False
# Building the generator
generator = build_generator()
#Defining the input for the generator
#and generating the images
z = Input(shape=(latent_dimensions,))
image = generator(z)
#Checking the validity of the generated image
valid = discriminator(image)
#Defining the combined model of the generator and the discriminator
combined_network = Model(z, valid)
combined_network.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5))
#Step 8: Train the network.
num_epochs=15000
batch_size=32
display_interval=2500
losses=[]
#Normalizing the input
X = (X / 127.5) - 1.
#Defining the Adversarial ground truths
valid = np.ones((batch_size, 1))
#Adding some noise
valid += 0.05 * np.random.random(valid.shape)
fake = np.zeros((batch_size, 1))
fake += 0.05 * np.random.random(fake.shape)
for epoch in range(num_epochs):
#Training the Discriminator
#Sampling a random half of images
index = np.random.randint(0, X.shape[0], batch_size)
images = X[index]
#Sampling noise and generating a batch of new images
noise = np.random.normal(0, 1, (batch_size, latent_dimensions))
generated_images = generator.predict(noise)
#Training the discriminator to detect more accurately
#whether a generated image is real or fake
discm_loss_real = discriminator.train_on_batch(images, valid)
discm_loss_fake = discriminator.train_on_batch(generated_images, fake)
discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake)
#Training the generator
#Training the generator to generate images
#that pass the authenticity test
genr_loss = combined_network.train_on_batch(noise, valid)
#Tracking the progress
if epoch % display_interval == 0:
display_images()
#Step 1: Import the required Python libraries:
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam,SGD
from keras.datasets import cifar10
#Step 2: Load the data.
#Loading the CIFAR10 data
(X, y), (_, _) = keras.datasets.cifar10.load_data()
#Selecting a single class of images
#The number was randomly chosen and any number
#between 1 and 10 can be chosen
X = X[y.flatten() == 8]
#Step 3: Define parameters to be used in later processes.
#Defining the Input shape
image_shape = (32, 32, 3)
latent_dimensions = 100
#Step 4: Define a utility function to build the generator.
def build_generator():
model = Sequential()
#Building the input layer
model.add(Dense(128 * 8 * 8, activation="relu",
input_dim=latent_dimensions))
model.add(Reshape((8, 8, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(Conv2D(3, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
#Generating the output image
noise = Input(shape=(latent_dimensions,))
image = model(noise)
return Model(noise, image)
#Step 5: Define a utility function to build the discriminator.
def build_discriminator():
#Building the convolutional layers
#to classify whether an image is real or fake
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2,
input_shape=image_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
#Building the output layer
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
image = Input(shape=image_shape)
validity = model(image)
return Model(image, validity)
#Step 6: Define a utility function to display the generated images.
def display_images():
r, c = 4,4
noise = np.random.normal(0, 1, (r * c,latent_dimensions))
generated_images = generator.predict(noise)
#Scaling the generated images
generated_images = 0.5 * generated_images + 0.5
fig, axs = plt.subplots(r, c)
count = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(generated_images[count, :,:,])
axs[i,j].axis('off')
count += 1
plt.show()
plt.close()
#Step 7: Build the GAN.
# Building and compiling the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5),
metrics=['accuracy'])
#Making the discriminator untrainable
#so that the generator can learn from fixed gradient
discriminator.trainable = False
# Building the generator
generator = build_generator()
#Defining the input for the generator
#and generating the images
z = Input(shape=(latent_dimensions,))
image = generator(z)
#Checking the validity of the generated image
valid = discriminator(image)
#Defining the combined model of the generator and the discriminator
combined_network = Model(z, valid)
combined_network.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5))
#Step 8: Train the network.
num_epochs=15000
batch_size=32
display_interval=2500
losses=[]
#Normalizing the input
X = (X / 127.5) - 1.
#Defining the Adversarial ground truths
valid = np.ones((batch_size, 1))
#Adding some noise
valid += 0.05 * np.random.random(valid.shape)
fake = np.zeros((batch_size, 1))
fake += 0.05 * np.random.random(fake.shape)
for epoch in range(num_epochs):
#Training the Discriminator
#Sampling a random half of images
index = np.random.randint(0, X.shape[0], batch_size)
images = X[index]
#Sampling noise and generating a batch of new images
noise = np.random.normal(0, 1, (batch_size, latent_dimensions))
generated_images = generator.predict(noise)
#Training the discriminator to detect more accurately
#whether a generated image is real or fake
discm_loss_real = discriminator.train_on_batch(images, valid)
discm_loss_fake = discriminator.train_on_batch(generated_images, fake)
discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake)
#Training the generator
#Training the generator to generate images
#that pass the authenticity test
genr_loss = combined_network.train_on_batch(noise, valid)
#Tracking the progress
if epoch % display_interval == 0:
display_images()

More Related Content

PDF
Can someone please explain what the code below is doing and comment on.pdf
PDF
Need an detailed analysis of what this code-model is doing- Thanks #St.pdf
PDF
Using the code below- I need help with creating code for the following.pdf
PDF
Need helping adding to the code below to plot the images from the firs.pdf
PDF
GANS Project for Image idetification.pdf
PDF
I have tried running this code below- and it is working- but the accur.pdf
PDF
Deep Learning with Julia1.0 and Flux
DOCX
Python program to build deep learning algorithm using a CNNs model to.docx
Can someone please explain what the code below is doing and comment on.pdf
Need an detailed analysis of what this code-model is doing- Thanks #St.pdf
Using the code below- I need help with creating code for the following.pdf
Need helping adding to the code below to plot the images from the firs.pdf
GANS Project for Image idetification.pdf
I have tried running this code below- and it is working- but the accur.pdf
Deep Learning with Julia1.0 and Flux
Python program to build deep learning algorithm using a CNNs model to.docx

Similar to Using the code below- I need help with the following 3 things- 1) Writ.pdf (20)

PDF
Assignment 5.2.pdf
PPTX
Image Tampering Detectionjl bbbbbb Image Tampering Detectionjl bbbbbb
PDF
Chainer ui v0.3 and imagereport
PDF
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
PDF
A Tour of Tensorflow's APIs
PPTX
From Tensorflow Graph to Tensorflow Eager
PDF
Anirudh Koul. 30 Golden Rules of Deep Learning Performance
DOCX
Need help filling out the missing sections of this code- the sections.docx
PPTX
Teach a neural network to read handwriting
PDF
Keras and TensorFlow
PDF
AIML4 CNN lab256 1hr (111-1).pdf
PDF
OpenPOWER Workshop in Silicon Valley
PDF
Lucio Floretta - TensorFlow and Deep Learning without a PhD - Codemotion Mila...
PDF
hidden layers in neural networks code examples tensorflow
PDF
Power ai tensorflowworkloadtutorial-20171117
PDF
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
PDF
Computer vision
PDF
TensorFlow and Keras: An Overview
PDF
[Japanese]Obake-GAN (Perturbative GAN): GAN with Perturbation Layers
PPTX
CNN_INTRO.pptx
Assignment 5.2.pdf
Image Tampering Detectionjl bbbbbb Image Tampering Detectionjl bbbbbb
Chainer ui v0.3 and imagereport
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
A Tour of Tensorflow's APIs
From Tensorflow Graph to Tensorflow Eager
Anirudh Koul. 30 Golden Rules of Deep Learning Performance
Need help filling out the missing sections of this code- the sections.docx
Teach a neural network to read handwriting
Keras and TensorFlow
AIML4 CNN lab256 1hr (111-1).pdf
OpenPOWER Workshop in Silicon Valley
Lucio Floretta - TensorFlow and Deep Learning without a PhD - Codemotion Mila...
hidden layers in neural networks code examples tensorflow
Power ai tensorflowworkloadtutorial-20171117
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
Computer vision
TensorFlow and Keras: An Overview
[Japanese]Obake-GAN (Perturbative GAN): GAN with Perturbation Layers
CNN_INTRO.pptx
Ad

More from acteleshoppe (20)

PDF
Using your understanding of Hardy-Weinberg proportions- which of the f.pdf
PDF
Using visual studio 2022- a C# windows form application- and your Doub.pdf
PDF
Using the summary table above- match the sources of variation with the.pdf
PDF
Using PowerPoint draft an academic poster critically analysing and ill.pdf
PDF
Using the life cycle image above to help you define meiosis- haplece a.pdf
PDF
Using the Influence tactic known as pressure usually Involves Multiple.pdf
PDF
Using the information below- please prepare the 2020 and 2021 Balance.pdf
PDF
Using the following national income accounting data- compute (a) GDP-.pdf
PDF
Using the company Netflix- Based on your prior research- determine.pdf
PDF
Using the assigned class reading- -SOCIAL CONTROL THEORIES OF URBAN PO.pdf
PDF
Using the areas in Figure (the Empirical Rule)- find the areas between.pdf
PDF
Using sources from the Internet- identify some of the problems facing.pdf
PDF
Using Python- Prompt the user to enter a word and a single character-.pdf
PDF
Using putty with Bash shell- I am trying to complete the following-.pdf
PDF
Using linux - For each environment variable below- state whether it is.pdf
PDF
Using Java and the parboiled Java library Write a BNF grammar for the.pdf
PDF
using basic python P4-16 Factoring of integers- Write a program that a.pdf
PDF
using C# language- a WPF application will be made with the application.pdf
PDF
Using a waterfall framework is suitable for a project involving which.pdf
PDF
Using an electronic version of Figure 13 (attached here)- 1- Illustrat.pdf
Using your understanding of Hardy-Weinberg proportions- which of the f.pdf
Using visual studio 2022- a C# windows form application- and your Doub.pdf
Using the summary table above- match the sources of variation with the.pdf
Using PowerPoint draft an academic poster critically analysing and ill.pdf
Using the life cycle image above to help you define meiosis- haplece a.pdf
Using the Influence tactic known as pressure usually Involves Multiple.pdf
Using the information below- please prepare the 2020 and 2021 Balance.pdf
Using the following national income accounting data- compute (a) GDP-.pdf
Using the company Netflix- Based on your prior research- determine.pdf
Using the assigned class reading- -SOCIAL CONTROL THEORIES OF URBAN PO.pdf
Using the areas in Figure (the Empirical Rule)- find the areas between.pdf
Using sources from the Internet- identify some of the problems facing.pdf
Using Python- Prompt the user to enter a word and a single character-.pdf
Using putty with Bash shell- I am trying to complete the following-.pdf
Using linux - For each environment variable below- state whether it is.pdf
Using Java and the parboiled Java library Write a BNF grammar for the.pdf
using basic python P4-16 Factoring of integers- Write a program that a.pdf
using C# language- a WPF application will be made with the application.pdf
Using a waterfall framework is suitable for a project involving which.pdf
Using an electronic version of Figure 13 (attached here)- 1- Illustrat.pdf
Ad

Recently uploaded (20)

PPTX
Pharma ospi slides which help in ospi learning
PDF
Classroom Observation Tools for Teachers
PDF
Black Hat USA 2025 - Micro ICS Summit - ICS/OT Threat Landscape
PDF
01-Introduction-to-Information-Management.pdf
PDF
Chinmaya Tiranga quiz Grand Finale.pdf
PDF
RMMM.pdf make it easy to upload and study
PPTX
202450812 BayCHI UCSC-SV 20250812 v17.pptx
PPTX
IMMUNITY IMMUNITY refers to protection against infection, and the immune syst...
PDF
A GUIDE TO GENETICS FOR UNDERGRADUATE MEDICAL STUDENTS
PDF
O5-L3 Freight Transport Ops (International) V1.pdf
PDF
FourierSeries-QuestionsWithAnswers(Part-A).pdf
PDF
Module 4: Burden of Disease Tutorial Slides S2 2025
PPTX
PPT- ENG7_QUARTER1_LESSON1_WEEK1. IMAGERY -DESCRIPTIONS pptx.pptx
PDF
Anesthesia in Laparoscopic Surgery in India
PPTX
Final Presentation General Medicine 03-08-2024.pptx
PDF
OBE - B.A.(HON'S) IN INTERIOR ARCHITECTURE -Ar.MOHIUDDIN.pdf
PPTX
Final Presentation General Medicine 03-08-2024.pptx
PDF
O7-L3 Supply Chain Operations - ICLT Program
PPTX
1st Inaugural Professorial Lecture held on 19th February 2020 (Governance and...
PDF
grade 11-chemistry_fetena_net_5883.pdf teacher guide for all student
Pharma ospi slides which help in ospi learning
Classroom Observation Tools for Teachers
Black Hat USA 2025 - Micro ICS Summit - ICS/OT Threat Landscape
01-Introduction-to-Information-Management.pdf
Chinmaya Tiranga quiz Grand Finale.pdf
RMMM.pdf make it easy to upload and study
202450812 BayCHI UCSC-SV 20250812 v17.pptx
IMMUNITY IMMUNITY refers to protection against infection, and the immune syst...
A GUIDE TO GENETICS FOR UNDERGRADUATE MEDICAL STUDENTS
O5-L3 Freight Transport Ops (International) V1.pdf
FourierSeries-QuestionsWithAnswers(Part-A).pdf
Module 4: Burden of Disease Tutorial Slides S2 2025
PPT- ENG7_QUARTER1_LESSON1_WEEK1. IMAGERY -DESCRIPTIONS pptx.pptx
Anesthesia in Laparoscopic Surgery in India
Final Presentation General Medicine 03-08-2024.pptx
OBE - B.A.(HON'S) IN INTERIOR ARCHITECTURE -Ar.MOHIUDDIN.pdf
Final Presentation General Medicine 03-08-2024.pptx
O7-L3 Supply Chain Operations - ICLT Program
1st Inaugural Professorial Lecture held on 19th February 2020 (Governance and...
grade 11-chemistry_fetena_net_5883.pdf teacher guide for all student

Using the code below- I need help with the following 3 things- 1) Writ.pdf

  • 1. Using the code below, I need help with the following 3 things: 1) Write Python code to plot the images from the first epoch. Take a screenshot of the images from the first epoch. 2) Write Python code to plot the images from the last epoch. Take a screenshot of the images from the last epoch. 3) Comment on the network performance. #Step 1: Import the required Python libraries: import numpy as np import matplotlib.pyplot as plt import keras from keras.layers import Input, Dense, Reshape, Flatten, Dropout from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam,SGD from keras.datasets import cifar10 #Step 2: Load the data. #Loading the CIFAR10 data (X, y), (_, _) = keras.datasets.cifar10.load_data() #Selecting a single class of images #The number was randomly chosen and any number #between 1 and 10 can be chosen X = X[y.flatten() == 8]
  • 2. #Step 3: Define parameters to be used in later processes. #Defining the Input shape image_shape = (32, 32, 3) latent_dimensions = 100 #Step 4: Define a utility function to build the generator. def build_generator(): model = Sequential() #Building the input layer model.add(Dense(128 * 8 * 8, activation="relu", input_dim=latent_dimensions)) model.add(Reshape((8, 8, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(Conv2D(3, kernel_size=3, padding="same")) model.add(Activation("tanh")) #Generating the output image noise = Input(shape=(latent_dimensions,))
  • 3. image = model(noise) return Model(noise, image) #Step 5: Define a utility function to build the discriminator. def build_discriminator(): #Building the convolutional layers #to classify whether an image is real or fake model = Sequential() model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=image_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.25)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(256, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.25))
  • 4. model.add(Dropout(0.25)) #Building the output layer model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) image = Input(shape=image_shape) validity = model(image) return Model(image, validity) #Step 6: Define a utility function to display the generated images. def display_images(): r, c = 4,4 noise = np.random.normal(0, 1, (r * c,latent_dimensions)) generated_images = generator.predict(noise) #Scaling the generated images generated_images = 0.5 * generated_images + 0.5 fig, axs = plt.subplots(r, c) count = 0 for i in range(r): for j in range(c): axs[i,j].imshow(generated_images[count, :,:,]) axs[i,j].axis('off') count += 1 plt.show() plt.close()
  • 5. #Step 7: Build the GAN. # Building and compiling the discriminator discriminator = build_discriminator() discriminator.compile(loss='binary_crossentropy', optimizer=Adam(0.0002,0.5), metrics=['accuracy']) #Making the discriminator untrainable #so that the generator can learn from fixed gradient discriminator.trainable = False # Building the generator generator = build_generator() #Defining the input for the generator #and generating the images z = Input(shape=(latent_dimensions,)) image = generator(z) #Checking the validity of the generated image valid = discriminator(image) #Defining the combined model of the generator and the discriminator combined_network = Model(z, valid) combined_network.compile(loss='binary_crossentropy', optimizer=Adam(0.0002,0.5)) #Step 8: Train the network. num_epochs=15000
  • 6. batch_size=32 display_interval=2500 losses=[] #Normalizing the input X = (X / 127.5) - 1. #Defining the Adversarial ground truths valid = np.ones((batch_size, 1)) #Adding some noise valid += 0.05 * np.random.random(valid.shape) fake = np.zeros((batch_size, 1)) fake += 0.05 * np.random.random(fake.shape) for epoch in range(num_epochs): #Training the Discriminator #Sampling a random half of images index = np.random.randint(0, X.shape[0], batch_size) images = X[index] #Sampling noise and generating a batch of new images noise = np.random.normal(0, 1, (batch_size, latent_dimensions)) generated_images = generator.predict(noise) #Training the discriminator to detect more accurately #whether a generated image is real or fake discm_loss_real = discriminator.train_on_batch(images, valid) discm_loss_fake = discriminator.train_on_batch(generated_images, fake)
  • 7. discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake) #Training the generator #Training the generator to generate images #that pass the authenticity test genr_loss = combined_network.train_on_batch(noise, valid) #Tracking the progress if epoch % display_interval == 0: display_images() #Step 1: Import the required Python libraries: import numpy as np import matplotlib.pyplot as plt import keras from keras.layers import Input, Dense, Reshape, Flatten, Dropout from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam,SGD from keras.datasets import cifar10 #Step 2: Load the data. #Loading the CIFAR10 data (X, y), (_, _) = keras.datasets.cifar10.load_data() #Selecting a single class of images
  • 8. #The number was randomly chosen and any number #between 1 and 10 can be chosen X = X[y.flatten() == 8] #Step 3: Define parameters to be used in later processes. #Defining the Input shape image_shape = (32, 32, 3) latent_dimensions = 100 #Step 4: Define a utility function to build the generator. def build_generator(): model = Sequential() #Building the input layer model.add(Dense(128 * 8 * 8, activation="relu", input_dim=latent_dimensions)) model.add(Reshape((8, 8, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(Conv2D(3, kernel_size=3, padding="same"))
  • 9. model.add(Activation("tanh")) #Generating the output image noise = Input(shape=(latent_dimensions,)) image = model(noise) return Model(noise, image) #Step 5: Define a utility function to build the discriminator. def build_discriminator(): #Building the convolutional layers #to classify whether an image is real or fake model = Sequential() model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=image_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.25)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25))
  • 10. model.add(Conv2D(256, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.25)) model.add(Dropout(0.25)) #Building the output layer model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) image = Input(shape=image_shape) validity = model(image) return Model(image, validity) #Step 6: Define a utility function to display the generated images. def display_images(): r, c = 4,4 noise = np.random.normal(0, 1, (r * c,latent_dimensions)) generated_images = generator.predict(noise) #Scaling the generated images generated_images = 0.5 * generated_images + 0.5 fig, axs = plt.subplots(r, c) count = 0 for i in range(r): for j in range(c): axs[i,j].imshow(generated_images[count, :,:,]) axs[i,j].axis('off')
  • 11. count += 1 plt.show() plt.close() #Step 7: Build the GAN. # Building and compiling the discriminator discriminator = build_discriminator() discriminator.compile(loss='binary_crossentropy', optimizer=Adam(0.0002,0.5), metrics=['accuracy']) #Making the discriminator untrainable #so that the generator can learn from fixed gradient discriminator.trainable = False # Building the generator generator = build_generator() #Defining the input for the generator #and generating the images z = Input(shape=(latent_dimensions,)) image = generator(z) #Checking the validity of the generated image valid = discriminator(image) #Defining the combined model of the generator and the discriminator combined_network = Model(z, valid) combined_network.compile(loss='binary_crossentropy',
  • 12. optimizer=Adam(0.0002,0.5)) #Step 8: Train the network. num_epochs=15000 batch_size=32 display_interval=2500 losses=[] #Normalizing the input X = (X / 127.5) - 1. #Defining the Adversarial ground truths valid = np.ones((batch_size, 1)) #Adding some noise valid += 0.05 * np.random.random(valid.shape) fake = np.zeros((batch_size, 1)) fake += 0.05 * np.random.random(fake.shape) for epoch in range(num_epochs): #Training the Discriminator #Sampling a random half of images index = np.random.randint(0, X.shape[0], batch_size) images = X[index] #Sampling noise and generating a batch of new images noise = np.random.normal(0, 1, (batch_size, latent_dimensions)) generated_images = generator.predict(noise) #Training the discriminator to detect more accurately
  • 13. #whether a generated image is real or fake discm_loss_real = discriminator.train_on_batch(images, valid) discm_loss_fake = discriminator.train_on_batch(generated_images, fake) discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake) #Training the generator #Training the generator to generate images #that pass the authenticity test genr_loss = combined_network.train_on_batch(noise, valid) #Tracking the progress if epoch % display_interval == 0: display_images()