typeError:dropout():参数' input' (位置1)必须是张量,而不是Str Bert模型
嗨,当我训练我的BERT模型进行情感分析时,我会遇到此错误,在该模型中,我的课程有3个结果,而我的输入数据是文本。
因此,当我训练模型时,我会遇到上述错误。我已经搜索了一些指南,并试图将此参数设置为我的bert模型bert_model = bertmodel.from_pretrataining(model_name,return_dict = false)
> 但是我仍然遇到与以前相同的错误。我正在使用“ Bert-Base cated”预验证的模型
# Function for a single training iteration
def train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler, n_examples):
model = model.train()
losses = []
correct_predictions = 0
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
# Backward prop
loss.backward()
# Gradient Descent
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
%%time
history = defaultdict(list)
best_accuracy = 0
for epoch in range(EPOCHS):
# Show details
print(f"Epoch {epoch + 1}/{EPOCHS}")
print("-" * 10)
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
device,
scheduler,
len(df_train)
)
print(f"Train loss {train_loss} accuracy {train_acc}")
# Get model performance (accuracy and loss)
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(df_val)
)
print(f"Val loss {val_loss} accuracy {val_acc}")
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
# If we beat prev performance
if val_acc > best_accuracy:
torch.save(model.state_dict(), 'best_model_state.bin')
best_accuracy = val_acc
Hi I encounter this error when I was training my Bert Model for sentiment analysis, where my classes have 3 outcomes and my input data is text.
So I got the above error when I am training the model. I have searched some of the guides and tried to set this parameter to my bert model bert_model = BertModel.from_pretrained(MODEL_NAME,return_dict=False)
but I am still getting the same error as before. I am using 'bert-base-cased' pretrained model
# Function for a single training iteration
def train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler, n_examples):
model = model.train()
losses = []
correct_predictions = 0
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
# Backward prop
loss.backward()
# Gradient Descent
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
%%time
history = defaultdict(list)
best_accuracy = 0
for epoch in range(EPOCHS):
# Show details
print(f"Epoch {epoch + 1}/{EPOCHS}")
print("-" * 10)
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
device,
scheduler,
len(df_train)
)
print(f"Train loss {train_loss} accuracy {train_acc}")
# Get model performance (accuracy and loss)
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(df_val)
)
print(f"Val loss {val_loss} accuracy {val_acc}")
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
# If we beat prev performance
if val_acc > best_accuracy:
torch.save(model.state_dict(), 'best_model_state.bin')
best_accuracy = val_acc
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论