无法使用python从谷歌驱动器下载大文件
我想使用 python 从 google 驱动器下载大文件。 我使用下面的代码做到了这一点
import pickle
import os.path
import requests
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import os
import pickle
class DriveAPI:
global SCOPES
SCOPES = ['https://www.googleapis.com/auth/drive.file']
def __init__(self):
self.creds = None
if os.path.exists('token.pickle'):
with open('token.pickle','rb') as token:
self.creds = pickle.load(token)
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
self.creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
self.creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(self.creds,token)
self.service = build('drive','v3',credentials=self.creds)
results = self.service.files().list(pageSize=100,fields='files(id,name,createdTime)').execute()
items = results.get('files',[])
def download_file_from_google_drive(self,id, destination):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(self,response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
if __name__ == "__main__":
obj = DriveAPI()
f_id = "File ID"
file_name = "File Name"
obj.service.permissions().create(body={"role":"reader", "type":"anyone"}, fileId=f_id).execute()
obj.FileDownload(f_id,file_name)
通过使用上面的代码,我能够在一段时间内(例如 2 个月)下载 2Gb 大小的文件。但现在我无法下载大文件。 如果我运行此代码,该文件仅下载 2.2kb 的文件。 但终端打印没有问题。
I want to download large size files from google drive using python.
And I did this using below code
import pickle
import os.path
import requests
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import os
import pickle
class DriveAPI:
global SCOPES
SCOPES = ['https://www.googleapis.com/auth/drive.file']
def __init__(self):
self.creds = None
if os.path.exists('token.pickle'):
with open('token.pickle','rb') as token:
self.creds = pickle.load(token)
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
self.creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
self.creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(self.creds,token)
self.service = build('drive','v3',credentials=self.creds)
results = self.service.files().list(pageSize=100,fields='files(id,name,createdTime)').execute()
items = results.get('files',[])
def download_file_from_google_drive(self,id, destination):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(self,response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
if __name__ == "__main__":
obj = DriveAPI()
f_id = "File ID"
file_name = "File Name"
obj.service.permissions().create(body={"role":"reader", "type":"anyone"}, fileId=f_id).execute()
obj.FileDownload(f_id,file_name)
By using above code I was able to download 2Gb size file for a certain period of time like 2 months. But now I'm unable to download large size files.
If I run this code the file downloads only 2.2kb file present.
But there is no issues that prints in terminal.
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论