-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathhyperparameter_optimization.py
More file actions
161 lines (151 loc) · 7.71 KB
/
hyperparameter_optimization.py
File metadata and controls
161 lines (151 loc) · 7.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import optuna
import pandas as pd
import plotly
from evaluate import load_ontology, evaluate_embeddings
from on2vec.training import train_ontology_embeddings, train_text_augmented_ontology_embeddings
from on2vec.embedding import embed_ontology_with_model
from on2vec.evaluation import evaluate_embeddings as on2vec_evaluate_embeddings
from on2vec.evaluation import EmbeddingEvaluator
def train_ontology_embeddings_wrapper(owl_file,
model_output,
model_type,
hidden_dim,
out_dim,
epochs,
loss_fn_name,
learning_rate,
use_multi_relation,
dropout,
num_bases = None,
include_text_features=False
#text_model_type=None,
#text_model_name=None,
#fusion_method="concat"
):
# Wrapper function to train ontology embeddings with fixed parameters
model=None
if include_text_features:
# train_text_augmented_ontology_embeddings(owl_file, model_output,
# text_model_type='sentence_transformer',
# text_model_name='all-MiniLM-L6-v2',
# backbone_model='gcn', fusion_method='concat',
# hidden_dim=128, out_dim=64,
# epochs=100, loss_fn_name='triplet',
# learning_rate=0.01, dropout=0.0) """
model =train_text_augmented_ontology_embeddings(
owl_file=owl_file,
model_output=model_output,
#text_model_type=text_model_type, # Default text model type, can be changed
#text_model_name=text_model_name, # Default text model name, can be changed
backbone_model=model_type, # Default backbone model, can be changed
#fusion_method=fusion_method, # Default fusion method, can be changed)
hidden_dim=hidden_dim, # Default hidden dimension, can be changed
out_dim=out_dim, # Default output dimension, can be changed
epochs=epochs,
loss_fn_name=loss_fn_name, # Default loss function, can be changed
learning_rate=learning_rate, # Default learning rate, can be changed
dropout=dropout)
else:
model=train_ontology_embeddings(
owl_file=owl_file,
model_output=model_output,
model_type=model_type, # Default model type, can be changed
hidden_dim=hidden_dim, # Default hidden dimension, can be changed
out_dim=out_dim, # Default output dimension, can be changed
epochs=epochs,
loss_fn_name=loss_fn_name, # Default loss function, can be changed
learning_rate=learning_rate, # Default learning rate, can be changed
use_multi_relation=use_multi_relation,
dropout=dropout,
num_bases=num_bases)
return model
def define_objective(trial, owl_file, model_output, epochs, use_multi_relation=False, dropout=0.0, num_bases=None, parquet_file='embeddings.parquet', include_text_features=False, relationship=['rdfs:subClassOf'],evaluator='on2vec_eval'):
# Hyperparameters to optimize
# Load ontology
ontology = load_ontology(owl_file)
hidden_dim = trial.suggest_int("hidden_dim",4, 256)
out_dim = trial.suggest_int("out_dim", 4, 256)
learning_rate = trial.suggest_float("learning_rate", 1e-5, 1e-1, log=True)
model_type = trial.suggest_categorical("model_type", ['gcn', 'gat', 'heterogeneous'])
loss_fn_name = trial.suggest_categorical("loss_fn_name", ['cosine', 'cross_entropy'])
#text_model_type = None
#if include_text_features:
# text_model_type = trial.suggest_categorical("text_model_type", ['sentence_transformer', 'huggingface', 'openai', 'tfidf'])
# Train embeddings
# def train_ontology_embeddings(owl_file, model_output, model_type='gcn', hidden_dim=128, out_dim=64,
# epochs=100, loss_fn_name='triplet', learning_rate=0.01, use_multi_relation=False,
# dropout=0.0, num_bases=None):
# """
# Complete training pipeline from OWL file to saved model.
# Args:
# owl_file (str): Path to OWL ontology file
# model_output (str): Path to save trained model
# model_type (str): Type of GNN model ('gcn', 'gat', 'rgcn', 'weighted_gcn', 'heterogeneous')
## out_dim (int): Output embedding dimension
# loss_fn_name (str): Name of loss function
# learning_rate (float): Learning rate
# use_multi_relation (bool): Use multi-relation graph building
# dropout (float): Dropout rate for multi-relation models
# num_bases (int, optional): Number of bases for RGCN decomposition
#"""
model = train_ontology_embeddings_wrapper(
owl_file=owl_file,
model_output=model_output,
model_type=model_type,
hidden_dim=hidden_dim,
out_dim=out_dim,
epochs=epochs,
loss_fn_name=loss_fn_name,
learning_rate=learning_rate,
use_multi_relation=use_multi_relation,
dropout=dropout,
num_bases=num_bases,
include_text_features=include_text_features,
#fusion_method=fusion_method
)
embed_ontology_with_model(owl_file=owl_file,
model_path=model_output,
output_file=parquet_file
)
roc_auc = 0.0
roc_auc_mlp = 0.0
roc_auc_lr = 0.0
#mean_rank = float('inf')
if(evaluator=='on2vec_eval'):
vals = EmbeddingEvaluator(parquet_file,owl_file).evaluate_extrinsic(classification_tasks=['logistic_regression','mlp'],link_prediction=False)
roc_auc_lr = vals['classifiers']['logistic_regression']['roc_auc']
roc_auc_mlp = vals['classifiers']['mlp']['roc_auc']
#mean_rank = vals["mean_rank"]
return roc_auc_mlp, roc_auc_lr
else:
# Evaluate embeddings
ontology = load_ontology(owl_file)
embeddings_df = pd.read_parquet(parquet_file)
metrics = evaluate_embeddings(ontology,embeddings_df, relationship=relationship)
roc_auc = metrics["roc_auc"]
mean_rank = metrics["mean_rank"]
return roc_auc, mean_rank
#mean_rank = metrics["mean_rank"]
# Evaluate model
study = optuna.create_study(directions=["maximize","minimize"])
study.optimize(lambda trial: define_objective(trial, owl_file='test.owl', model_output='model.pth', epochs=10, use_multi_relation=True, dropout=0.0, num_bases=5, parquet_file='embeddings.parquet',include_text_features=False, relationship= ['interacts_with'], evaluator='bruh'
#,fusion_method='concat'
), n_trials=100)
paretoplot=optuna.visualization.plot_pareto_front(study, target_names=["roc_auc","mean_rank"])
paretoplot.write_html("pareto_front.html")
print(f"Number of trials on the Pareto front: {len(study.best_trials)}")
# Print details of the trial with the highest accuracy
trial_with_highest_accuracy = max(study.best_trials, key=lambda t: t.values[1])
print("Trial with highest accuracy: ")
print(f"\tnumber: {trial_with_highest_accuracy.number}")
print(f"\tparams: {trial_with_highest_accuracy.params}")
print(f"\tvalues: {trial_with_highest_accuracy.values}")
#Plot of hyperparameter importance for roc_auc
hyperparameterimportanceroc=optuna.visualization.plot_param_importances(
study, target=lambda t: t.values[0], target_name="roc_auc"
)
hyperparameterimportanceroc.write_html("hyperparameter_importance_roc.html")
#Plot of hyperparameter importance for mean_rank
#hyperparameterimportancemeanrank=optuna.visualization.plot_param_importances(
# study, target=lambda t: t.values[0], target_name="mean_rank"
#)