将 KerasRegressor 与 GirdSearchCV 结合使用

发布于 2025-01-09 20:51:23 字数 4957 浏览 5 评论 0原文

我运行了以下代码来找到神经网络的最佳超参数:

def prepare_dataset() -> tuple:
    df = pd.read_csv(r"H:\.userdata\Documents\Dataset\Total_Dataset_New+.csv", 
     encoding='latin', delimiter = ';')

from sklearn.model_selection import train_test_split

x = df.drop(['Best_Focus', 'RotationX', 'RotationY', 'sample_id'], axis=1).values
y = df['Best_Focus'].values
#y = df[['Best_Focus', 'RotationX', 'RotationY']].values

### Standardization of data
from sklearn.preprocessing import StandardScaler, MinMaxScaler

PredictorScaler = MinMaxScaler()
#TargetVarScaler = MinMaxScaler()

# Storing the fit object for later reference
PredictorScalerFit = PredictorScaler.fit(x)
#TargetVarScalerFit = TargetVarScaler.fit(y)

# Generating the standardized values of X and y
x_transformed = PredictorScalerFit.transform(x)
#y_transformed = TargetVarScalerFit.transform(y)

# Split the data into training and testing set
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x_transformed, y, test_size=0.25, random_state=42)

print(f"x_train shape: {x_train.shape}")
print(f"y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape}")
print(f"y_test shape: {y_test.shape}")

return (x_train, y_train), (x_test, y_test)

if __name__ == "__main__":

num_features = 31
num_targets = 1

(x_train, y_train), (x_test, y_test) = prepare_dataset()

def build_model(num_features = 31, num_targets = 1) -> Sequential:
    
    init_w = TruncatedNormal(mean=0.0, stddev=0.01)
    init_b = Constant(value=0.0)

    model = Sequential()
    model.add(Dense(units=500, input_shape=(num_features,)))
    model.add(Activation("relu"))
    model.add(Dense(units=250))
    model.add(Activation("relu"))
    model.add(Dense(units=100))
    model.add(Activation("relu"))
    model.add(Dense(units=num_targets))
    model.add(Activation("tanh"))
    # model.summary()

    model.compile(loss="mean_squared_error", optimizer=Adam(learning_rate=0.0005), metrics="mae")
    saved_model = model.save(r"H:\.userdata\Documents\ML_Kurs_materialien2\UdemyTF-master\models")
    loaded_model = keras.models.load_model(r"H:\.userdata\Documents\ML_Kurs_materialien2\UdemyTF-master\models")
    
    return loaded_model


#model = build_model(num_features, num_targets)

#tb_callback = TensorBoard(log_dir=MODEL_LOG_DIR, histogram_freq=1, write_graph=True)
model = KerasRegressor(build_fn = build_model(), verbose=1)
'''kfold = KFold(n_splits=10, shuffle=True, random_state=70)
results = cross_val_score(model, x_train, y_train, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))

prediction = estimator.predict(X_test)
accuracy_score(Y_test, prediction)'''

batch_size = [10, 20, 40, 60, 80, 100]
epochs = [10, 50, 100]
#optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']

param_grid = {'epochs': epochs, 'batch_size': batch_size} #optimizer=optimizer, callbacks=callbacks)
grid = GridSearchCV(estimator= model,param_grid= param_grid) #scoring='r2', n_jobs=-1, cv=6)
grid_result = grid.fit(x_train, y_train)


print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))


model.fit(
    x=x_train,#.reshape(len(df),num_features,num_targets),
    y=y_train,
    epochs=200,
    batch_size=1000,
    verbose=0,
    validation_data=(x_test, y_test),
    callbacks=[tb_callback])



#model.save_weights(filepath=MODEL_FILE_PATH)
#model.load_weights(filepath=MODEL_FILE_PATH)

scores = model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Scores after saving: {scores}")

但我总是出现以下错误:

INFO:tensorflow:Assets written to: H:\.userdata\Documents\ML_Kurs_materialien2\UdemyTF- 
master\models\assets
INFO:tensorflow:Assets written to: ram://cd0c50f5-ab30-4c1a-91fa-ca858c173b5f/assets

FileNotFoundError                         Traceback (most recent call last)
C:\Users\MDAROU~1\AppData\Local\Temp/ipykernel_20860/3959200035.py in <module>
118     param_grid = {'epochs': epochs, 'batch_size': batch_size} #optimizer=optimizer, 
 callbacks=callbacks)
119     grid = GridSearchCV(estimator= model,param_grid= param_grid) #scoring='r2', n_jobs=-1, 
 cv=6)
120     grid_result = grid.fit(x_train, y_train)
121 
122 

FileNotFoundError: Unsuccessful TensorSliceReader constructor: Failed to find any matching 
files for ram://f6a45fc0-93be-4a46-b23b-b9535fa6af8f/variables/variables
You may be trying to load on a different device from the computational device. Consider 
setting the `experimental_io_device` option in `tf.saved_model.LoadOptions` to the io_device 
such as '/job:localhost'.

这似乎是 KerasRegressor 的问题,但直到现在我都无法修复它。
如果有人遇到同样的问题并能告诉我该怎么办,我将不胜感激!

i have run the following code in order to find the best hyperparameter of the neural network:

def prepare_dataset() -> tuple:
    df = pd.read_csv(r"H:\.userdata\Documents\Dataset\Total_Dataset_New+.csv", 
     encoding='latin', delimiter = ';')

from sklearn.model_selection import train_test_split

x = df.drop(['Best_Focus', 'RotationX', 'RotationY', 'sample_id'], axis=1).values
y = df['Best_Focus'].values
#y = df[['Best_Focus', 'RotationX', 'RotationY']].values

### Standardization of data
from sklearn.preprocessing import StandardScaler, MinMaxScaler

PredictorScaler = MinMaxScaler()
#TargetVarScaler = MinMaxScaler()

# Storing the fit object for later reference
PredictorScalerFit = PredictorScaler.fit(x)
#TargetVarScalerFit = TargetVarScaler.fit(y)

# Generating the standardized values of X and y
x_transformed = PredictorScalerFit.transform(x)
#y_transformed = TargetVarScalerFit.transform(y)

# Split the data into training and testing set
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x_transformed, y, test_size=0.25, random_state=42)

print(f"x_train shape: {x_train.shape}")
print(f"y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape}")
print(f"y_test shape: {y_test.shape}")

return (x_train, y_train), (x_test, y_test)

if __name__ == "__main__":

num_features = 31
num_targets = 1

(x_train, y_train), (x_test, y_test) = prepare_dataset()

def build_model(num_features = 31, num_targets = 1) -> Sequential:
    
    init_w = TruncatedNormal(mean=0.0, stddev=0.01)
    init_b = Constant(value=0.0)

    model = Sequential()
    model.add(Dense(units=500, input_shape=(num_features,)))
    model.add(Activation("relu"))
    model.add(Dense(units=250))
    model.add(Activation("relu"))
    model.add(Dense(units=100))
    model.add(Activation("relu"))
    model.add(Dense(units=num_targets))
    model.add(Activation("tanh"))
    # model.summary()

    model.compile(loss="mean_squared_error", optimizer=Adam(learning_rate=0.0005), metrics="mae")
    saved_model = model.save(r"H:\.userdata\Documents\ML_Kurs_materialien2\UdemyTF-master\models")
    loaded_model = keras.models.load_model(r"H:\.userdata\Documents\ML_Kurs_materialien2\UdemyTF-master\models")
    
    return loaded_model


#model = build_model(num_features, num_targets)

#tb_callback = TensorBoard(log_dir=MODEL_LOG_DIR, histogram_freq=1, write_graph=True)
model = KerasRegressor(build_fn = build_model(), verbose=1)
'''kfold = KFold(n_splits=10, shuffle=True, random_state=70)
results = cross_val_score(model, x_train, y_train, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))

prediction = estimator.predict(X_test)
accuracy_score(Y_test, prediction)'''

batch_size = [10, 20, 40, 60, 80, 100]
epochs = [10, 50, 100]
#optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']

param_grid = {'epochs': epochs, 'batch_size': batch_size} #optimizer=optimizer, callbacks=callbacks)
grid = GridSearchCV(estimator= model,param_grid= param_grid) #scoring='r2', n_jobs=-1, cv=6)
grid_result = grid.fit(x_train, y_train)


print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))


model.fit(
    x=x_train,#.reshape(len(df),num_features,num_targets),
    y=y_train,
    epochs=200,
    batch_size=1000,
    verbose=0,
    validation_data=(x_test, y_test),
    callbacks=[tb_callback])



#model.save_weights(filepath=MODEL_FILE_PATH)
#model.load_weights(filepath=MODEL_FILE_PATH)

scores = model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Scores after saving: {scores}")

but i become always the following error:

INFO:tensorflow:Assets written to: H:\.userdata\Documents\ML_Kurs_materialien2\UdemyTF- 
master\models\assets
INFO:tensorflow:Assets written to: ram://cd0c50f5-ab30-4c1a-91fa-ca858c173b5f/assets

FileNotFoundError                         Traceback (most recent call last)
C:\Users\MDAROU~1\AppData\Local\Temp/ipykernel_20860/3959200035.py in <module>
118     param_grid = {'epochs': epochs, 'batch_size': batch_size} #optimizer=optimizer, 
 callbacks=callbacks)
119     grid = GridSearchCV(estimator= model,param_grid= param_grid) #scoring='r2', n_jobs=-1, 
 cv=6)
120     grid_result = grid.fit(x_train, y_train)
121 
122 

FileNotFoundError: Unsuccessful TensorSliceReader constructor: Failed to find any matching 
files for ram://f6a45fc0-93be-4a46-b23b-b9535fa6af8f/variables/variables
You may be trying to load on a different device from the computational device. Consider 
setting the `experimental_io_device` option in `tf.saved_model.LoadOptions` to the io_device 
such as '/job:localhost'.

it seems to be a problem with KerasRegressor, but i could not fix it until now.
I'll be thankful, if anybody faced the same problem and could tell me what should i do!!

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。
列表为空,暂无数据
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文