diff --git a/ensembles/cruel_summer/main.py b/ensembles/cruel_summer/main.py index 0239b137..8c7d3671 100644 --- a/ensembles/cruel_summer/main.py +++ b/ensembles/cruel_summer/main.py @@ -14,7 +14,15 @@ from execute_model_runs import execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.ensemble_path import EnsemblePath + from common_utils.global_cache import GlobalCache + model_name = EnsemblePath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") diff --git a/ensembles/white_mustang/main.py b/ensembles/white_mustang/main.py index 0239b137..8c7d3671 100644 --- a/ensembles/white_mustang/main.py +++ b/ensembles/white_mustang/main.py @@ -14,7 +14,15 @@ from execute_model_runs import execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.ensemble_path import EnsemblePath + from common_utils.global_cache import GlobalCache + model_name = EnsemblePath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") diff --git a/meta_tools/templates/ensemble/template_main.py b/meta_tools/templates/ensemble/template_main.py index fa175ea0..663fe6c2 100644 --- a/meta_tools/templates/ensemble/template_main.py +++ b/meta_tools/templates/ensemble/template_main.py @@ -4,34 +4,17 @@ def generate(script_dir: Path) -> bool: """ - Generates a Python script that sets up and executes model runs with Weights & Biases (WandB) integration. - - This function creates a script that imports necessary modules, sets up project paths, and defines the - main execution logic for running either a single model run or a sweep of model configurations. The - generated script includes command-line argument parsing, validation, and runtime logging. + Generates a script that sets up the project paths, parses command-line arguments, + sets up logging, and executes a single model run. Parameters: script_dir (Path): - The directory where the generated Python script will be saved. This should be a valid writable - path that exists within the project structure. + The directory where the generated script will be saved. + This should be a valid writable path. Returns: bool: - True if the script was successfully written to the specified directory, False otherwise. - - The generated script includes the following features: - - Imports required libraries and sets up the path to include the `common_utils` module. - - Initializes project paths using the `setup_project_paths` function. - - Parses command-line arguments with `parse_args`. - - Validates arguments to ensure correctness with `validate_arguments`. - - Logs into Weights & Biases using `wandb.login()`. - - Executes a model run based on the provided command-line flags, either initiating a sweep or a single run. - - Calculates and prints the runtime of the execution in minutes. - - Note: - - Ensure that the `common_utils` module and all other imported modules are accessible from the - specified script directory. - - The generated script is designed to be executed as a standalone Python script. + True if the script was written and compiled successfully, False otherwise. """ code = """import wandb import sys @@ -49,8 +32,16 @@ def generate(script_dir: Path) -> bool: from execute_model_runs import execute_single_run warnings.filterwarnings("ignore") - -logger = setup_logging('run.log') +try: + from common_utils.ensemble_path import EnsemblePath + from common_utils.global_cache import GlobalCache + model_name = EnsemblePath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model separated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) +logger = setup_logging("run.log") if __name__ == "__main__": diff --git a/meta_tools/templates/model/template_config_meta.py b/meta_tools/templates/model/template_config_meta.py index 8978b021..646a46ca 100644 --- a/meta_tools/templates/model/template_config_meta.py +++ b/meta_tools/templates/model/template_config_meta.py @@ -31,10 +31,10 @@ def generate(script_dir: Path, model_name: str, model_algorithm: str) -> bool: \""" meta_config = {{ - "name": "{model_name}", # Eg. happy_kitten - "algorithm": "{model_algorithm}," # Eg. "LSTM", "CNN", "Transformer" + "name": "{model_name}", + "algorithm": "{model_algorithm}", # Uncomment and modify the following lines as needed for additional metadata: - # "target(S)": ["ln_sb_best", "ln_ns_best", "ln_os_best", "ln_sb_best_binarized", "ln_ns_best_binarized", "ln_os_best_binarized"], + # "depvar": "ln_ged_sb_dep", # "queryset": "escwa001_cflong", # "level": "pgm", # "creator": "Your name here" diff --git a/meta_tools/templates/model/template_evaluate_model.py b/meta_tools/templates/model/template_evaluate_model.py index e0ce4a3d..6f9dcfab 100644 --- a/meta_tools/templates/model/template_evaluate_model.py +++ b/meta_tools/templates/model/template_evaluate_model.py @@ -22,7 +22,7 @@ def generate(script_dir: Path) -> bool: import logging from model_path import ModelPath from utils_log_files import create_log_file, read_log_file -from utils_outputs import save_model_outputs, save_predictions +from utils_save_outputs import save_model_outputs, save_predictions from utils_run import get_standardized_df from utils_artifacts import get_latest_model_artifact from utils_evaluation_metrics import generate_metric_dict diff --git a/meta_tools/templates/model/template_evaluate_sweep.py b/meta_tools/templates/model/template_evaluate_sweep.py index 805181a8..03cf0e74 100644 --- a/meta_tools/templates/model/template_evaluate_sweep.py +++ b/meta_tools/templates/model/template_evaluate_sweep.py @@ -32,12 +32,12 @@ def evaluate_sweep(config, stepshift_model): run_type = config["run_type"] steps = config["steps"] - df_viewser = pd.read_pickle(path_raw / f"{{{{run_type}}}}_viewser_df.pkl") + df_viewser = pd.read_pickle(path_raw / f"{{run_type}}_viewser_df.pkl") df = stepshift_model.predict(run_type, df_viewser) df = get_standardized_df(df, config) # Temporarily keep this because the metric to minimize is MSE - pred_cols = [f"step_pred_{{{{str(i)}}}}" for i in steps] + pred_cols = [f"step_pred_{{str(i)}}" for i in steps] df["mse"] = df.apply(lambda row: mean_squared_error([row[config["depvar"]]] * 36, [row[col] for col in pred_cols]), axis=1) diff --git a/meta_tools/templates/model/template_generate_forecast.py b/meta_tools/templates/model/template_generate_forecast.py index bd49882e..6e25fd4c 100644 --- a/meta_tools/templates/model/template_generate_forecast.py +++ b/meta_tools/templates/model/template_generate_forecast.py @@ -22,7 +22,7 @@ def generate(script_dir: Path) -> bool: from model_path import ModelPath from utils_log_files import create_log_file, read_log_file from utils_run import get_standardized_df -from utils_outputs import save_predictions +from utils_save_outputs import save_predictions from utils_artifacts import get_latest_model_artifact logger = logging.getLogger(__name__) @@ -38,28 +38,28 @@ def forecast_model_artifact(config, artifact_name): # if an artifact name is provided through the CLI, use it. # Otherwise, get the latest model artifact based on the run type if artifact_name: - logger.info(f"Using (non-default) artifact: {{{{artifact_name}}}}") + logger.info(f"Using (non-default) artifact: {{artifact_name}}") if not artifact_name.endswith(".pkl"): artifact_name += ".pkl" path_artifact = path_artifacts / artifact_name else: # use the latest model artifact based on the run type - logger.info(f"Using latest (default) run type ({{{{run_type}}}}) specific artifact") + logger.info(f"Using latest (default) run type ({{run_type}}) specific artifact") path_artifact = get_latest_model_artifact(path_artifacts, run_type) config["timestamp"] = path_artifact.stem[-15:] - df_viewser = pd.read_pickle(path_raw / f"{{{{run_type}}}}_viewser_df.pkl") + df_viewser = pd.read_pickle(path_raw / f"{{run_type}}_viewser_df.pkl") try: stepshift_model = pd.read_pickle(path_artifact) except FileNotFoundError: - logger.exception(f"Model artifact not found at {{{{path_artifact}}}}") + logger.exception(f"Model artifact not found at {{path_artifact}}") df_predictions = stepshift_model.predict(run_type, df_viewser) df_predictions = get_standardized_df(df_predictions, config) data_generation_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - date_fetch_timestamp = read_log_file(path_raw / f"{{{{run_type}}}}_data_fetch_log.txt").get("Data Fetch Timestamp", None) + date_fetch_timestamp = read_log_file(path_raw / f"{{run_type}}_data_fetch_log.txt").get("Data Fetch Timestamp", None) save_predictions(df_predictions, path_generated, config) create_log_file(path_generated, config, config["timestamp"], data_generation_timestamp, date_fetch_timestamp) diff --git a/meta_tools/templates/model/template_main.py b/meta_tools/templates/model/template_main.py index 4706a03a..9eeaa89b 100644 --- a/meta_tools/templates/model/template_main.py +++ b/meta_tools/templates/model/template_main.py @@ -52,15 +52,18 @@ def generate(script_dir: Path) -> bool: try: from common_utils.model_path import ModelPath from common_utils.global_cache import GlobalCache - GlobalCache["current_model"] = ModelPath.get_model_name_from_path(Path(__file__)) -except Exception: - pass + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") if __name__ == "__main__": wandb.login() - + args = parse_args() validate_arguments(args) @@ -69,4 +72,4 @@ def generate(script_dir: Path) -> bool: else: execute_single_run(args) """ - return utils_script_gen.save_script(script_dir, code) + return utils_script_gen.save_script(script_dir, code) \ No newline at end of file diff --git a/meta_tools/templates/model/template_train_model.py b/meta_tools/templates/model/template_train_model.py index 2be82037..6a6d739d 100644 --- a/meta_tools/templates/model/template_train_model.py +++ b/meta_tools/templates/model/template_train_model.py @@ -32,14 +32,14 @@ def train_model_artifact(config): path_generated = model_path.data_generated path_artifacts = model_path.artifacts run_type = config["run_type"] - df_viewser = pd.read_pickle(path_raw / f"{{{{run_type}}}}_viewser_df.pkl") + df_viewser = pd.read_pickle(path_raw / f"{{run_type}}_viewser_df.pkl") stepshift_model = stepshift_training(config, run_type, df_viewser) if not config["sweep"]: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - model_filename = f"{{{{run_type}}}}_model_{{{{timestamp}}}}.pkl" + model_filename = f"{{run_type}}_model_{{timestamp}}.pkl" stepshift_model.save(path_artifacts / model_filename) - date_fetch_timestamp = read_log_file(path_raw / f"{{{{run_type}}}}_data_fetch_log.txt").get("Data Fetch Timestamp", None) + date_fetch_timestamp = read_log_file(path_raw / f"{{run_type}}_data_fetch_log.txt").get("Data Fetch Timestamp", None) create_log_file(path_generated, config, timestamp, None, date_fetch_timestamp) return stepshift_model diff --git a/meta_tools/templates/model/template_utils_run.py b/meta_tools/templates/model/template_utils_run.py index 2b84ed64..59220060 100644 --- a/meta_tools/templates/model/template_utils_run.py +++ b/meta_tools/templates/model/template_utils_run.py @@ -27,7 +27,7 @@ def get_model(config, partitioner_dict): Get the model based on the algorithm specified in the config \""" - if config["algorithm"] == "HurdleRegression": + if config["algorithm"] == "HurdleModel": model = HurdleModel(config, partitioner_dict) else: config["model_reg"] = config["algorithm"] @@ -49,7 +49,7 @@ def get_standardized_df(df, config): if run_type in ["calibration", "testing"]: cols = [depvar] + df.forecasts.prediction_columns elif run_type == "forecasting": - cols = [f"step_pred_{{{{i}}}}" for i in steps] + cols = [f"step_pred_{{i}}" for i in steps] df = df.replace([np.inf, -np.inf], 0)[cols] df = df.mask(df < 0, 0) return df diff --git a/models/blank_space/main.py b/models/blank_space/main.py index 06ea149b..a607c6db 100644 --- a/models/blank_space/main.py +++ b/models/blank_space/main.py @@ -15,7 +15,15 @@ from execute_model_runs import execute_sweep_run, execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.model_path import ModelPath + from common_utils.global_cache import GlobalCache + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") diff --git a/models/electric_relaxation/main.py b/models/electric_relaxation/main.py index bb88ebc7..8a7a396c 100644 --- a/models/electric_relaxation/main.py +++ b/models/electric_relaxation/main.py @@ -14,7 +14,15 @@ from execute_model_runs import execute_sweep_run, execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.model_path import ModelPath + from common_utils.global_cache import GlobalCache + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging('run.log') diff --git a/models/lavender_haze/main.py b/models/lavender_haze/main.py index 360c51d1..335e3199 100644 --- a/models/lavender_haze/main.py +++ b/models/lavender_haze/main.py @@ -17,9 +17,12 @@ try: from common_utils.model_path import ModelPath from common_utils.global_cache import GlobalCache - GlobalCache["current_model"] = ModelPath.get_model_name_from_path(Path(__file__)) -except Exception: - pass + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") diff --git a/models/old_money/main.py b/models/old_money/main.py index 3176cba9..36429ecb 100644 --- a/models/old_money/main.py +++ b/models/old_money/main.py @@ -14,7 +14,15 @@ from execute_model_runs import execute_sweep_run, execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.model_path import ModelPath + from common_utils.global_cache import GlobalCache + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") diff --git a/models/orange_pasta/main.py b/models/orange_pasta/main.py index 3176cba9..36429ecb 100644 --- a/models/orange_pasta/main.py +++ b/models/orange_pasta/main.py @@ -14,7 +14,15 @@ from execute_model_runs import execute_sweep_run, execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.model_path import ModelPath + from common_utils.global_cache import GlobalCache + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") diff --git a/models/wildest_dream/main.py b/models/wildest_dream/main.py index 3176cba9..36429ecb 100644 --- a/models/wildest_dream/main.py +++ b/models/wildest_dream/main.py @@ -14,7 +14,15 @@ from execute_model_runs import execute_sweep_run, execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.model_path import ModelPath + from common_utils.global_cache import GlobalCache + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log") diff --git a/models/yellow_pikachu/main.py b/models/yellow_pikachu/main.py index 3176cba9..36429ecb 100644 --- a/models/yellow_pikachu/main.py +++ b/models/yellow_pikachu/main.py @@ -14,7 +14,15 @@ from execute_model_runs import execute_sweep_run, execute_single_run warnings.filterwarnings("ignore") - +try: + from common_utils.model_path import ModelPath + from common_utils.global_cache import GlobalCache + model_name = ModelPath.get_model_name_from_path(PATH) + GlobalCache["current_model"] = model_name +except ImportError as e: + warnings.warn(f"ImportError: {e}. Some functionalities (model seperated log files) may not work properly.", ImportWarning) +except Exception as e: + warnings.warn(f"An unexpected error occurred: {e}.", RuntimeWarning) logger = setup_logging("run.log")