Skip to content

Commit

Permalink
Merge pull request #139 from ilabcode/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
PTWaade authored Oct 3, 2024
2 parents 1075d01 + f53d111 commit 9ba4366
Show file tree
Hide file tree
Showing 12 changed files with 379 additions and 260 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ActionModels"
uuid = "320cf53b-cc3b-4b34-9a10-0ecb113566a3"
authors = ["Peter Thestrup Waade ptw@cas.au.dk", "Anna Hedvig Møller hedvig.2808@gmail.com", "Jacopo Comoglio jacopo.comoglio@gmail.com", "Christoph Mathys chmathys@cas.au.dk"]
version = "0.6.2"
version = "0.6.3"

[deps]
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Expand Down
11 changes: 6 additions & 5 deletions src/ActionModels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ using ProgressMeter, Distributed #TODO: get rid of this (only needed for paramet
#Export functions
export Agent, RejectParameters, InitialStateParameter, ParameterGroup
export init_agent, premade_agent, warn_premade_defaults, multiple_actions, check_agent
export simple_statistical_model,
export independent_agents_population_model,
create_model, fit_model, parameter_recovery, single_recovery
export plot_parameter_distribution,
plot_predictive_simulation, plot_trajectory, plot_trajectory!
Expand Down Expand Up @@ -44,13 +44,14 @@ include("create_agent/create_premade_agent.jl")
include("create_agent/multiple_actions.jl")
include("create_agent/check_agent.jl")
#Functions for fitting agents to data
include("fitting/helper_functions.jl")
include("fitting/extract_quantities.jl")
include("fitting/create_model.jl")
include("fitting/simple_statistical_model.jl")
include("fitting/single_agent_statistical_model.jl")
include("fitting/agent_model.jl")
include("fitting/population_models/independent_agents_population_model.jl")
include("fitting/population_models/single_agent_population_model.jl")
include("fitting/fit_model.jl")
include("fitting/parameter_recovery.jl")
include("fitting/helper_functions.jl")
include("fitting/extract_quantities.jl")
#include("fitting/prefit_checks.jl")

#Plotting functions for agents
Expand Down
220 changes: 220 additions & 0 deletions src/fitting/agent_model.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
###############################################
### WITH SINGLE ACTION / NO MISSING ACTIONS ###
###############################################
@model function agent_models(agent::Agent, agent_ids::Vector{Symbol}, parameters_per_agent::Vector{D}, inputs_per_agent::Vector{I}, actions_per_agent::Vector{Vector{R}}, actions_flattened::Vector{R}, missing_actions::Nothing) where {D<:Dict, I<:Vector, R<:Real}

#TODO: Could use a list comprehension here to make it more efficient
#Initialize a vector for storing the action probability distributions
action_distributions = Vector(undef, length(actions_flattened))

#Initialize action index
action_idx = 0

#Go through each agent
for (agent_parameters, agent_inputs, agent_actions) in zip(parameters_per_agent, inputs_per_agent, actions_per_agent)

#Set the agent parameters
set_parameters!(agent, agent_parameters)
reset!(agent)

#Go through each timestep
for (input, action) in zip(agent_inputs, agent_actions)

#Increment one action index
action_idx += 1

#Get the action probability distributions from the action model
@inbounds action_distributions[action_idx] = agent.action_model(agent, input)

#Store the agent's action in the agent
update_states!(agent, "action", action)
end
end

#Make sure the action distributions are stored as a concrete type (by constructing a new vector)
action_distributions = [dist for dist in action_distributions]

#Sample the actions from the probability distributions
actions_flattened ~ arraydist(action_distributions)
end


##################################################
### WITH MULTIPLE ACTIONS / NO MISSING ACTIONS ###
##################################################
@model function agent_models(agent::Agent, agent_ids::Vector{Symbol}, parameters_per_agent::Vector{D}, inputs_per_agent::Vector{I}, actions_per_agent::Vector{Matrix{R}}, actions_flattened::Matrix{R}, missing_actions::Nothing) where {D<:Dict, I<:Vector, R<:Real}

#Initialize a vector for storing the action probability distributions
action_distributions = Matrix(undef, size(actions_flattened)...)

#Initialize action index
action_idx = 0

#Go through each agent
for (agent_parameters, agent_inputs, agent_actions) in zip(parameters_per_agent, inputs_per_agent, actions_per_agent)

#Set the agent parameters
set_parameters!(agent, agent_parameters)
reset!(agent)

#Go through each timestep
for (input, action) in zip(agent_inputs, Tuple.(eachrow(agent_actions)))

#Increment one action index
action_idx += 1

#Get the action probability distributions from the action model
@inbounds action_distributions[action_idx, :] = agent.action_model(agent, input)

#Store the agent's action in the agent
update_states!(agent, "action", action)
end
end

#Make sure the action distributions are stored as a concrete type (by constructing a new vector)
action_distributions = [dist for dist in action_distributions]

#Sample the actions from the probability distributions
actions_flattened ~ arraydist(action_distributions)
end









############################################
### WITH MISSING ACTIONS - SUPERFUNCTION ###
############################################
@model function agent_models(agent::Agent, agent_ids::Vector{Symbol}, parameters_per_agent::Vector{D}, inputs_per_agent::Vector{I}, actions_per_agent::Vector{A}, actions_flattened::A, missing_actions::MissingActions) where {D<:Dict, I<:Vector, A<:Array}

#For each agent
for (agent_id, agent_parameters, agent_inputs, agent_actions) in zip(agent_ids, parameters_per_agent, inputs_per_agent, actions_per_agent)

#Fit it to the data
@submodel prefix = "$agent_id" agent_model(agent, agent_parameters, agent_inputs, agent_actions)
end
end

#################################################
### WITH SINGLE ACTION / WITH MISSING ACTIONS ###
#################################################
@model function agent_model(agent::Agent, parameters::D, inputs::I, actions::Vector{Union{Missing, R}}) where {D<:Dict, I<:Vector, R<:Real}

#Set the agent parameters
set_parameters!(agent, parameters)
reset!(agent)

#Go through each timestep
for (timestep, input) in enumerate(inputs)

#Get the action probability distributions from the action model
action_distribution = agent.action_model(agent, input)

#Sample the action from the probability distribution
@inbounds actions[timestep] ~ action_distribution

#Save the action to the agent in case it needs it in the future
@inbounds update_states!(
agent,
"action",
ad_val(actions[timestep]),
)
end
end

####################################################
### WITH MULTIPLE ACTIONS / WITH MISSING ACTIONS ###
####################################################
@model function agent_model(agent::Agent, parameters::D, inputs::I, actions::Matrix{Union{Missing, R}}) where {D<:Dict, I<:Vector, R<:Real}

#Set the agent parameters
set_parameters!(agent, parameters)
reset!(agent)

#Go through each timestep
for (timestep, input) in enumerate(inputs)

#Get the action probability distributions from the action model
action_distributions = agent.action_model(agent, input)

#Go through each action
for (action_idx, single_distribution) in enumerate(action_distributions)

#Sample the action from the probability distribution
actions[timestep, action_idx] ~
single_distribution
#TODO: can use @inbounds here when there's a check for whether the right amount of actions are produced
end

#Add the actions to the agent in case it needs it in the future
update_states!(
agent,
"action",
ad_val.(actions[timestep, :]),
)
#TODO: can use @inbounds here when there's a check for whether the right amount of actions are produced
end
end

###############################################
### WITH SINGLE ACTION / NO MISSING ACTIONS ###
###############################################
@model function agent_model(agent::Agent, parameters::D, inputs::I, actions::Vector{R}) where {D<:Dict, I<:Vector, R<:Real}

#Set the agent parameters
set_parameters!(agent, parameters)
reset!(agent)

#Initialize a vector for storing the action probability distributions
action_distributions = Vector(undef, length(inputs))

#Go through each timestep
for (timestep, (input, action)) in enumerate(zip(inputs, actions))

#Get the action probability distributions from the action model
@inbounds action_distributions[timestep] = agent.action_model(agent, input)

#Store the agent's action in the agent
update_states!(agent, "action", action)
end

#Make sure the action distributions are stored as a concrete type (by constructing a new vector)
action_distributions = [dist for dist in action_distributions]

#Sample the actions from the probability distributions
actions ~ arraydist(action_distributions)
end

##################################################
### WITH MULTIPLE ACTIONS / NO MISSING ACTIONS ###
##################################################

@model function agent_model(agent::Agent, parameters::D, inputs::I, actions::Matrix{R}) where {D<:Dict, I<:Vector, R<:Real}

#Set the agent parameters
set_parameters!(agent, parameters)
reset!(agent)

#Initialize a matrix for storing the action probability distributions
action_distributions = Matrix(undef, size(actions)...)

#Go through each timestep
for (timestep, (input, action)) in enumerate(zip(inputs, Tuple.(eachrow(actions))))

#Get the action probability distributions from the action model
action_distributions[timestep, :] = agent.action_model(agent, input) #TODO: can use @inbounds here when there's a check for whether the right amount of actions are used

#Store the agent's action in the agent
update_states!(agent, "action", action)
end

#Make sure the action distributions are stored as a concrete type (by constructing a new matrix)
action_distributions = [dist for dist in action_distributions]

#Sample the actions from the probability distributions
actions ~ arraydist(action_distributions)
end
Loading

2 comments on commit 9ba4366

@PTWaade
Copy link
Collaborator Author

@PTWaade PTWaade commented on 9ba4366 Oct 3, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register

#Minor changes
Optimized the turing model

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/116503

Tip: Release Notes

Did you know you can add release notes too? Just add markdown formatted text underneath the comment after the text
"Release notes:" and it will be added to the registry PR, and if TagBot is installed it will also be added to the
release that TagBot creates. i.e.

@JuliaRegistrator register

Release notes:

## Breaking changes

- blah

To add them here just re-invoke and the PR will be updated.

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.6.3 -m "<description of version>" 9ba4366d97ccf4a1c099bc131b7d8d6545a88130
git push origin v0.6.3

Please sign in to comment.