SlideShare a Scribd company logo
!1
!2
!3
!4




!5
!6
!7
!8
!9
!10
!11
!12
!13
!14
!15
!16
!17
using Flux, Flux.Data.MNIST
function prepare_dataset(train=true)
train_or_test = ifelse(train,:train,:test)
imgs = MNIST.images(train_or_test)
X = hcat(float.(vec.(imgs))...)
labels = MNIST.labels(train_or_test)
Y = onehotbatch(labels,0:9)
return X, Y
end
X, Y = prepare_dataset(train=true)
train_X, train_Y, val_X, val_Y = split_dataset_random(X, Y)
!19
using Flux: Chain, Dense
using NNlib: softmax, relu
function define_model()
mlp = Chain(Dense(28^2,100,relu),
Dense(100,100,relu),
Dense(100,10),
softmax)
return mlp
end
model = define_model()
!20
using Base.Iterators: partition
batchsize = 32
serial_iterator = partition(1:size(train_Y)[2],batchsize)
train_dataset = [(train_X[:,batch] ,train_Y[:,batch]) for
batch in serial_iterator]
#train_dataset
size(train_dataset[1][1]) #(784, 32)
size(train_dataset[1][2]) #(10, 32)
!21
using Flux: onecold, crossentropy, @epochs
using Flux: ADAM
loss(x,y)= crossentropy(model(x),y)
optimizer = ADAM(params(model))
epochs = 10
@epochs epochs Flux.train!(loss, train_dataset, optimizer)
!22
using BSON: @load, @save
pretrained = model |> cpu
weights = Tracker.data.(params(pretrained))
@save "pretrained.bson" pretrained
@save "weights.bson" weights
!23
using Statistics: mean
using Flux: onecold
function predict()
println("Start to evaluate testset")
println("loading pretrained model")
@load "pretrained.bson" pretrained
model = pretrained
accuracy(x, y) = mean(onecold(model(x)) .== onecold(y))
println("prepare dataset")
X, Y = prepare_dataset(train=false)
@show accuracy(X, Y)
println("Done")
end
predict()
!24
using CuArrays
model = define_model() |> gpu
train_dataset = [(train_X[:,batch] |> gpu, train_Y[:,batch] |> gpu)
for batch in serial_iterator]
# or
train_dataset = gpu.(train_dataset)
#
!25
X = Metalhead.trainimgs(CIFAR10)
batchsize = 16
getarray(im) = Float64.(permutedims(channelview(im), (2, 3, 1)))
imgs = [getarray(X[i].img) for i in 1:50000]
labels = onehotbatch([X[i].ground_truth.class for i in 1:50000],1:10)
data = [(cat(imgs[batch]..., dims=4), labels[:,batch]) for i in
partition(1:49000, batchsize)]
!26
!27
!28
!29
using Flux, Metalhead
using Metalhead: classify
#ImageNet
vgg = VGG19()
classify(vgg, "elephant.jpeg")
model = Chain(vgg[1:end-2],
Dense(4096,101),
softmax)
Flux.testmode!(model)
opt = ADAM(param(model[1][9:end],model[2:end]))
!30
struct Dataset
data::Array{Tuple{String,Int64},1}
augment::Bool
image_cache::Dict{Int,Array{RGB{Normed{UInt8,8}},2}}
use_cache::Bool
function Dataset(data; train=true)
augment=train
use_cache=train
image_cache = Dict{Int,Array{RGB{Normed{UInt8,8}},2}}()
new(length(data), data, augment, image_cache, use_cache)
end
end
function get_example(dataset::Dataset, i::Int)
path, label = dataset.data[i]
if dataset.use_cache && haskey(dataset.image_cache, i)
img = dataset.image_cache[i]
else
dataset.image_cache[i] = load(path)
end
img = copyimg(img)
#
return img, label
end
!31
using Random
struct SerialIterator
len::Int
get_example::Function
batchsize::Int
indices::Vector
function SerialIterator(dataset::Dataset, batchsize::Int; shuffle=true)
indices=Vector(1:dataset.len)
if shuffle
shuffle!(indices)
end
_get_example = i -> get_example(dataset, i)
new(length(dataset), _get_example, batchsize, indices)
end
end
function Base.iterate(diter::SerialIterator, state=(1, 0))
idx_start, count = state
if idx_start + diter.batchsize > diter.len
return nothing
else
indices = diter.indices[idx_start:idx_start + diter.batchsize-1]
element = diter.get_example.(indices)
return (element, (idx_start + diter.batchsize, count + 1))
end
end
!32
Chain(Flux.muri,
Flux.ganbareba
Flux.kanoudesu
softmax)
!33
Chain(Flux.muri,
Flux.ganbareba
Flux.kanoudesu
softmax)
!34
struct Zikiso
layers::Chain
activation_fn
end
function (ec::Zikiso)(x)
h=ec.layers(x)
activation_fn.(h + x)
end
function Zikiso()
chain = Chain(Flux.muri,
Flux.ganbareba,
Flux.kanousesu)
Zikiso(chain,Flux.softmax)
end
@treelike ExpandedConv
!35
using Flux
using Flux:Chain, DepthwiseConv
using Flux:@treelike
relu6(x) = min(max(zero(x),x), eltype(x)(6))
struct ExpandedConv
layers::Chain
stride::Int
end
function ExpandedConv(expand::Int, ch::Pair{<:Integer,<:Integer}; stride=1)
inch=ch[1]
outch=ch[2]
expandedch = inch * expand
if expand != 1
chain = Chain(Conv((1,1), inch=>expandedch),
BatchNorm(expandedch, relu6),
DepthwiseConv((3,3),expandedch,relu6,stride=stride,pad=1),
BatchNorm(expandedch, relu6),
Conv((1,1),expandedch=>outch),
BatchNorm(outch))
else
chain = Chain(DepthwiseConv((3,3),expandedch,relu6,stride=stride,pad=1),
BatchNorm(expandedch, relu6),
Conv((1,1),expandedch=>outch),
BatchNorm(outch))
end
ExpandedConv(chain, stride)
end
@treelike ExpandedConv
function (ec::ExpandedConv)(x)
h=ec.layers(x)
if size(h)==size(x)
relu6.(h + x)
else
relu6.(h)
end
end
struct ExpandedConv
layers::Chain
stride::Int
end
function ExpandedConv()
#
end
function (ec::ExpandedConv)(x)
h=ec.layers(x)
if size(h)==size(x)
relu6.(h + x)
else
relu6.(h)
end
end
!36
struct MobileNetv2
layers::Chain
end
mv2() = Chain(Conv((3,3),3=>32,stride=2,pad=1),
BatchNorm(32,relu6),
ExpandedConv(1,32=>16),
ExpandedConv(6,16=>24,stride=2),
ExpandedConv(6,24=>24),
ExpandedConv(6,24=>32,stride=2),
ExpandedConv(6,32=>32),
ExpandedConv(6,32=>32),
ExpandedConv(6,32=>64,stride=2),
ExpandedConv(6,64=>64),
ExpandedConv(6,64=>64),
ExpandedConv(6,64=>64),
ExpandedConv(6,64=>96),
ExpandedConv(6,96=>96),
ExpandedConv(6,96=>96),
ExpandedConv(6,96=>160,stride=2),
ExpandedConv(6,160=>160),
ExpandedConv(6,160=>160),
ExpandedConv(6,160=>320),
Conv((1,1),320=>120),
BatchNorm(120,relu6),
MeanPool((7,7)),
x -> reshape(x, :, size(x, 4)),
Dense(120,101),
softmax
)
MobileNetv2() = MobileNetv2(mv2())
@treelike MobileNetv2
(mv2::MobileNetv2)(x) = mv2.layers(x)
!37
!38
!39
!40
!41
some_number = 10
for i in some_number
println(i)
end
for i in 1:some_number
println(i)
end
!42
!43
!44

More Related Content

PPTX
Deep learning study 3
PDF
Glm talk Tomas
DOCX
Mosaic plot in R.
PPTX
Pytorch and Machine Learning for the Math Impaired
DOCX
Advanced Data Visualization Examples with R-Part II
PDF
NumPy Refresher
PPTX
Monad - a functional design pattern
PDF
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
Deep learning study 3
Glm talk Tomas
Mosaic plot in R.
Pytorch and Machine Learning for the Math Impaired
Advanced Data Visualization Examples with R-Part II
NumPy Refresher
Monad - a functional design pattern
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12

What's hot (19)

PDF
Kristhyan kurtlazartezubia evidencia1-metodosnumericos
DOCX
R-ggplot2 package Examples
PDF
Fast parallelizable scenario-based stochastic optimization
PDF
Numpy tutorial(final) 20160303
PDF
Neural networks using tensor flow in amazon deep learning server
PDF
Introduction to NumPy (PyData SV 2013)
PPTX
TensorFlow
PDF
Dl1 deep learning_algorithms
PDF
Effective Numerical Computation in NumPy and SciPy
PDF
Clojure for Data Science
PDF
Numpy python cheat_sheet
PDF
Machine Learning Live
PPT
Scientific Computing with Python Webinar March 19: 3D Visualization with Mayavi
PDF
Manual "The meuse data set"
DOCX
imager package in R and examples..
PPTX
Clojure for Data Science
PDF
Gradient boosting in practice: a deep dive into xgboost
PDF
Memory Efficient Adaptive Optimization
PDF
Intoduction to numpy
Kristhyan kurtlazartezubia evidencia1-metodosnumericos
R-ggplot2 package Examples
Fast parallelizable scenario-based stochastic optimization
Numpy tutorial(final) 20160303
Neural networks using tensor flow in amazon deep learning server
Introduction to NumPy (PyData SV 2013)
TensorFlow
Dl1 deep learning_algorithms
Effective Numerical Computation in NumPy and SciPy
Clojure for Data Science
Numpy python cheat_sheet
Machine Learning Live
Scientific Computing with Python Webinar March 19: 3D Visualization with Mayavi
Manual "The meuse data set"
imager package in R and examples..
Clojure for Data Science
Gradient boosting in practice: a deep dive into xgboost
Memory Efficient Adaptive Optimization
Intoduction to numpy
Ad

Similar to Deep Learning with Julia1.0 and Flux (20)

PDF
Icpp power ai-workshop 2018
PDF
OpenPOWER Workshop in Silicon Valley
PDF
Lucio Floretta - TensorFlow and Deep Learning without a PhD - Codemotion Mila...
PDF
Pydata DC 2018 (Skorch - A Union of Scikit-learn and PyTorch)
PDF
AIML4 CNN lab256 1hr (111-1).pdf
PPTX
Deep cv 101
PDF
A Tour of Tensorflow's APIs
PDF
Power ai tensorflowworkloadtutorial-20171117
PDF
Using the code below- I need help with the following 3 things- 1) Writ.pdf
PDF
Handwritten digits recognition report
PDF
XebiCon'17 : Faites chauffer les neurones de votre Smartphone avec du Deep Le...
PPTX
Teach a neural network to read handwriting
PDF
딥러닝 중급 - AlexNet과 VggNet (Basic of DCNN : AlexNet and VggNet)
PDF
Chainer ui v0.3 and imagereport
PDF
Deep Learning for Developers (October 2017)
PDF
Using the code below- I need help with creating code for the following.pdf
PDF
Keras and TensorFlow
PPTX
Explanation on Tensorflow example -Deep mnist for expert
PDF
DeepLearning ハンズオン資料 20161220
PDF
Need an detailed analysis of what this code-model is doing- Thanks #St.pdf
Icpp power ai-workshop 2018
OpenPOWER Workshop in Silicon Valley
Lucio Floretta - TensorFlow and Deep Learning without a PhD - Codemotion Mila...
Pydata DC 2018 (Skorch - A Union of Scikit-learn and PyTorch)
AIML4 CNN lab256 1hr (111-1).pdf
Deep cv 101
A Tour of Tensorflow's APIs
Power ai tensorflowworkloadtutorial-20171117
Using the code below- I need help with the following 3 things- 1) Writ.pdf
Handwritten digits recognition report
XebiCon'17 : Faites chauffer les neurones de votre Smartphone avec du Deep Le...
Teach a neural network to read handwriting
딥러닝 중급 - AlexNet과 VggNet (Basic of DCNN : AlexNet and VggNet)
Chainer ui v0.3 and imagereport
Deep Learning for Developers (October 2017)
Using the code below- I need help with creating code for the following.pdf
Keras and TensorFlow
Explanation on Tensorflow example -Deep mnist for expert
DeepLearning ハンズオン資料 20161220
Need an detailed analysis of what this code-model is doing- Thanks #St.pdf
Ad

Recently uploaded (20)

PDF
Blue Purple Modern Animated Computer Science Presentation.pdf.pdf
PPTX
ACSFv1EN-58255 AWS Academy Cloud Security Foundations.pptx
PDF
Machine learning based COVID-19 study performance prediction
PPTX
MYSQL Presentation for SQL database connectivity
PDF
Unlocking AI with Model Context Protocol (MCP)
PDF
Per capita expenditure prediction using model stacking based on satellite ima...
PDF
Approach and Philosophy of On baking technology
PDF
Advanced methodologies resolving dimensionality complications for autism neur...
PDF
Reach Out and Touch Someone: Haptics and Empathic Computing
PPTX
Machine Learning_overview_presentation.pptx
PDF
NewMind AI Weekly Chronicles - August'25-Week II
PDF
Review of recent advances in non-invasive hemoglobin estimation
PDF
Empathic Computing: Creating Shared Understanding
PPT
Teaching material agriculture food technology
PPTX
Digital-Transformation-Roadmap-for-Companies.pptx
PDF
Encapsulation theory and applications.pdf
PDF
Electronic commerce courselecture one. Pdf
PPTX
VMware vSphere Foundation How to Sell Presentation-Ver1.4-2-14-2024.pptx
PDF
Encapsulation_ Review paper, used for researhc scholars
PPTX
Cloud computing and distributed systems.
Blue Purple Modern Animated Computer Science Presentation.pdf.pdf
ACSFv1EN-58255 AWS Academy Cloud Security Foundations.pptx
Machine learning based COVID-19 study performance prediction
MYSQL Presentation for SQL database connectivity
Unlocking AI with Model Context Protocol (MCP)
Per capita expenditure prediction using model stacking based on satellite ima...
Approach and Philosophy of On baking technology
Advanced methodologies resolving dimensionality complications for autism neur...
Reach Out and Touch Someone: Haptics and Empathic Computing
Machine Learning_overview_presentation.pptx
NewMind AI Weekly Chronicles - August'25-Week II
Review of recent advances in non-invasive hemoglobin estimation
Empathic Computing: Creating Shared Understanding
Teaching material agriculture food technology
Digital-Transformation-Roadmap-for-Companies.pptx
Encapsulation theory and applications.pdf
Electronic commerce courselecture one. Pdf
VMware vSphere Foundation How to Sell Presentation-Ver1.4-2-14-2024.pptx
Encapsulation_ Review paper, used for researhc scholars
Cloud computing and distributed systems.

Deep Learning with Julia1.0 and Flux

  • 1. !1
  • 2. !2
  • 3. !3
  • 4. !4
  • 6. !6
  • 7. !7
  • 8. !8
  • 9. !9
  • 10. !10
  • 11. !11
  • 12. !12
  • 13. !13
  • 14. !14
  • 15. !15
  • 16. !16
  • 17. !17
  • 18. using Flux, Flux.Data.MNIST function prepare_dataset(train=true) train_or_test = ifelse(train,:train,:test) imgs = MNIST.images(train_or_test) X = hcat(float.(vec.(imgs))...) labels = MNIST.labels(train_or_test) Y = onehotbatch(labels,0:9) return X, Y end X, Y = prepare_dataset(train=true) train_X, train_Y, val_X, val_Y = split_dataset_random(X, Y)
  • 19. !19 using Flux: Chain, Dense using NNlib: softmax, relu function define_model() mlp = Chain(Dense(28^2,100,relu), Dense(100,100,relu), Dense(100,10), softmax) return mlp end model = define_model()
  • 20. !20 using Base.Iterators: partition batchsize = 32 serial_iterator = partition(1:size(train_Y)[2],batchsize) train_dataset = [(train_X[:,batch] ,train_Y[:,batch]) for batch in serial_iterator] #train_dataset size(train_dataset[1][1]) #(784, 32) size(train_dataset[1][2]) #(10, 32)
  • 21. !21 using Flux: onecold, crossentropy, @epochs using Flux: ADAM loss(x,y)= crossentropy(model(x),y) optimizer = ADAM(params(model)) epochs = 10 @epochs epochs Flux.train!(loss, train_dataset, optimizer)
  • 22. !22 using BSON: @load, @save pretrained = model |> cpu weights = Tracker.data.(params(pretrained)) @save "pretrained.bson" pretrained @save "weights.bson" weights
  • 23. !23 using Statistics: mean using Flux: onecold function predict() println("Start to evaluate testset") println("loading pretrained model") @load "pretrained.bson" pretrained model = pretrained accuracy(x, y) = mean(onecold(model(x)) .== onecold(y)) println("prepare dataset") X, Y = prepare_dataset(train=false) @show accuracy(X, Y) println("Done") end predict()
  • 24. !24 using CuArrays model = define_model() |> gpu train_dataset = [(train_X[:,batch] |> gpu, train_Y[:,batch] |> gpu) for batch in serial_iterator] # or train_dataset = gpu.(train_dataset) #
  • 25. !25 X = Metalhead.trainimgs(CIFAR10) batchsize = 16 getarray(im) = Float64.(permutedims(channelview(im), (2, 3, 1))) imgs = [getarray(X[i].img) for i in 1:50000] labels = onehotbatch([X[i].ground_truth.class for i in 1:50000],1:10) data = [(cat(imgs[batch]..., dims=4), labels[:,batch]) for i in partition(1:49000, batchsize)]
  • 26. !26
  • 27. !27
  • 28. !28
  • 29. !29 using Flux, Metalhead using Metalhead: classify #ImageNet vgg = VGG19() classify(vgg, "elephant.jpeg") model = Chain(vgg[1:end-2], Dense(4096,101), softmax) Flux.testmode!(model) opt = ADAM(param(model[1][9:end],model[2:end]))
  • 30. !30 struct Dataset data::Array{Tuple{String,Int64},1} augment::Bool image_cache::Dict{Int,Array{RGB{Normed{UInt8,8}},2}} use_cache::Bool function Dataset(data; train=true) augment=train use_cache=train image_cache = Dict{Int,Array{RGB{Normed{UInt8,8}},2}}() new(length(data), data, augment, image_cache, use_cache) end end function get_example(dataset::Dataset, i::Int) path, label = dataset.data[i] if dataset.use_cache && haskey(dataset.image_cache, i) img = dataset.image_cache[i] else dataset.image_cache[i] = load(path) end img = copyimg(img) # return img, label end
  • 31. !31 using Random struct SerialIterator len::Int get_example::Function batchsize::Int indices::Vector function SerialIterator(dataset::Dataset, batchsize::Int; shuffle=true) indices=Vector(1:dataset.len) if shuffle shuffle!(indices) end _get_example = i -> get_example(dataset, i) new(length(dataset), _get_example, batchsize, indices) end end function Base.iterate(diter::SerialIterator, state=(1, 0)) idx_start, count = state if idx_start + diter.batchsize > diter.len return nothing else indices = diter.indices[idx_start:idx_start + diter.batchsize-1] element = diter.get_example.(indices) return (element, (idx_start + diter.batchsize, count + 1)) end end
  • 34. !34 struct Zikiso layers::Chain activation_fn end function (ec::Zikiso)(x) h=ec.layers(x) activation_fn.(h + x) end function Zikiso() chain = Chain(Flux.muri, Flux.ganbareba, Flux.kanousesu) Zikiso(chain,Flux.softmax) end @treelike ExpandedConv
  • 35. !35 using Flux using Flux:Chain, DepthwiseConv using Flux:@treelike relu6(x) = min(max(zero(x),x), eltype(x)(6)) struct ExpandedConv layers::Chain stride::Int end function ExpandedConv(expand::Int, ch::Pair{<:Integer,<:Integer}; stride=1) inch=ch[1] outch=ch[2] expandedch = inch * expand if expand != 1 chain = Chain(Conv((1,1), inch=>expandedch), BatchNorm(expandedch, relu6), DepthwiseConv((3,3),expandedch,relu6,stride=stride,pad=1), BatchNorm(expandedch, relu6), Conv((1,1),expandedch=>outch), BatchNorm(outch)) else chain = Chain(DepthwiseConv((3,3),expandedch,relu6,stride=stride,pad=1), BatchNorm(expandedch, relu6), Conv((1,1),expandedch=>outch), BatchNorm(outch)) end ExpandedConv(chain, stride) end @treelike ExpandedConv function (ec::ExpandedConv)(x) h=ec.layers(x) if size(h)==size(x) relu6.(h + x) else relu6.(h) end end struct ExpandedConv layers::Chain stride::Int end function ExpandedConv() # end function (ec::ExpandedConv)(x) h=ec.layers(x) if size(h)==size(x) relu6.(h + x) else relu6.(h) end end
  • 36. !36 struct MobileNetv2 layers::Chain end mv2() = Chain(Conv((3,3),3=>32,stride=2,pad=1), BatchNorm(32,relu6), ExpandedConv(1,32=>16), ExpandedConv(6,16=>24,stride=2), ExpandedConv(6,24=>24), ExpandedConv(6,24=>32,stride=2), ExpandedConv(6,32=>32), ExpandedConv(6,32=>32), ExpandedConv(6,32=>64,stride=2), ExpandedConv(6,64=>64), ExpandedConv(6,64=>64), ExpandedConv(6,64=>64), ExpandedConv(6,64=>96), ExpandedConv(6,96=>96), ExpandedConv(6,96=>96), ExpandedConv(6,96=>160,stride=2), ExpandedConv(6,160=>160), ExpandedConv(6,160=>160), ExpandedConv(6,160=>320), Conv((1,1),320=>120), BatchNorm(120,relu6), MeanPool((7,7)), x -> reshape(x, :, size(x, 4)), Dense(120,101), softmax ) MobileNetv2() = MobileNetv2(mv2()) @treelike MobileNetv2 (mv2::MobileNetv2)(x) = mv2.layers(x)
  • 37. !37
  • 38. !38
  • 39. !39
  • 40. !40
  • 41. !41 some_number = 10 for i in some_number println(i) end for i in 1:some_number println(i) end
  • 42. !42
  • 43. !43
  • 44. !44