https://github.com/JuliaDiffEq/DiffEqFlux.jl
Revision 9af1cd7313fe1b3e7d235bc9f42212961ae93a1e authored by Christopher Rackauckas on 08 April 2020, 18:36:50 UTC, committed by GitHub on 08 April 2020, 18:36:50 UTC
2 parent s 35cfab2 + 424703e
Raw File
Tip revision: 9af1cd7313fe1b3e7d235bc9f42212961ae93a1e authored by Christopher Rackauckas on 08 April 2020, 18:36:50 UTC
Merge pull request #220 from ranjanan/RA/quaddirect
Tip revision: 9af1cd7
size_handling_adjoint.jl
using OrdinaryDiffEq, Test

p = [1.5 1.0;3.0 1.0]
function lotka_volterra(du,u,p,t)
  du[1] = p[1,1]*u[1] - p[1,2]*u[1]*u[2]
  du[2] = -p[2,1]*u[2] + p[2,2]*u[1]*u[2]
end

u0 = [1.0,1.0]
tspan = (0.0,10.0)

prob = ODEProblem(lotka_volterra,u0,tspan,p)
sol = solve(prob,Tsit5())

#=
using Plots
plot(sol)
=#

import Flux
using DiffEqFlux
p = [2.2 1.0;2.0 0.4] # Tweaked Initial Parameter Array
ps = Flux.params(p)

function predict_adjoint() # Our 1-layer neural network
  Array(concrete_solve(prob,Tsit5(),prob.u0,p,saveat=0.0:0.1:10.0))
end

loss_adjoint() = sum(abs2,x-1 for x in predict_adjoint())

data = Iterators.repeated((), 100)
opt = Flux.ADAM(0.1)
cb = function () #callback function to observe training
  display(loss_adjoint())
end

predict_adjoint()

# Display the ODE with the initial parameter values.
cb()
Flux.train!(loss_adjoint, ps, data, opt, cb = cb)

@test loss_adjoint() < 1
back to top