hxz393 / BaiduPanFilesTransfers

百度网盘批量转存、分享和检测工具
GNU General Public License v3.0
1.48k stars 244 forks source link

实际有用的Cookie只有BDUSS和STOKEN两个。 #4

Closed xJogger closed 2 years ago

xJogger commented 4 years ago

如题,另外Header里有用的只有UA和referer,再有就是bdstoken实际不需要,至少获取文件列表、转存分享链接是不需要的。

贴一个我根据大佬的接口撸的代码:

import os
import re
import urllib

import requests
from fake_useragent import UserAgent

class BaiDuPan(object):

    def __init__(self,BDUSS,STOKEN):
        cookies = {
            'BDUSS'  : BDUSS,
            'STOKEN' : STOKEN
        }

        ua = UserAgent()
        headers = {
            'User-Agent': ua.firefox,
            'Referer': 'https://pan.baidu.com',
        }

        self.session = requests.Session()
        self.session.headers.update(headers)
        self.session.cookies =  requests.cookies.cookiejar_from_dict(cookies)

    # 验证链接函数
    def get_link_info(self,link_url, pass_code):
        # 验证提取码
        check_url = 'https://pan.baidu.com/share/verify?surl=' + link_url[25:48]
        post_data = {'pwd': pass_code, 'vcode': '', 'vcode_str': '', }
        response_post = self.session.post(url=check_url,data=post_data,timeout=10,allow_redirects=False)
        bdclnd = response_post.json()['randsk']
        self.session.cookies =  requests.utils.add_dict_to_cookiejar(self.session.cookies,{'BDCLND':bdclnd})
        # 获取文件信息
        response = self.session.get(url=link_url,timeout=15,allow_redirects=True).content.decode("utf-8")
        shareid   = re.findall('"shareid":(\\d+?),"', response)[0]
        user_id   = re.findall('"uk":(\\d+?),"', response)[0]
        fs_id     = re.findall('"fs_id":(\\d+?),"', response)[0]
        file_path = re.findall('"path":"(.+?)","', response)[0].replace('\\/','/').encode("utf-8").decode("unicode-escape")
        return [shareid,user_id,fs_id,file_path]

    # 获取分享文件列表函数
    def get_share_file_list(self,link_url, pass_code):
        shareid,user_id,fs_id,file_path = self.get_link_info(link_url, pass_code)
        share_name = os.path.split(file_path)[1]
        file_path = urllib.parse.quote(file_path).replace('/','%2F')
        url = 'https://pan.baidu.com/share/list?uk=%s&shareid=%s&order=other&desc=1&showempty=0&web=1&page=1&num=1000&dir=%s' % (user_id,shareid,file_path)
        response = self.session.get(url=url, timeout=15, allow_redirects=False)
        file_list =  response.json()['list']

        share_list = []
        for file in file_list:
            share_list = share_list + [ [file['server_filename'],str(file['fs_id'])] ]
        return [shareid,user_id,fs_id,share_name,share_list]

    # 获取目录列表函数
    def get_file_list(self,path):
        path = urllib.parse.quote(path).replace('/','%2F')
        url = 'https://pan.baidu.com/api/list?order=name&desc=1&showempty=0&web=1&page=1&num=1000&dir=' + path
        response = self.session.get(url=url,timeout=15, allow_redirects=False)
        dir_list_json = response.json()['list']
        file_list = []
        for file in dir_list_json:
            file_list.append(file['server_filename'])
        return file_list

    # 转存文件函数
    def transfer_file(self,shareid,user_id,fs_id,dir_name):
        url = 'https://pan.baidu.com/share/transfer?shareid=' + shareid + '&from=' + user_id
        post_data = {'fsidlist': '[' + fs_id + ']', 'path': dir_name, }
        errno = self.session.post(url=url,data=post_data, timeout=15, allow_redirects=False).json()['errno']
        return True if errno == 0 else False

    def smart_save(self,link_url,pass_code,root_path):
        current_root_list = self.get_file_list(root_path)
        shareid,user_id,fs_id,share_name,share_list = self.get_share_file_list(link_url, pass_code)
        # 如果分享链接中的文件夹在百度盘里不存在,直接转存整个文件夹
        print('资源:',share_name)
        if share_name not in current_root_list:
            if self.transfer_file(shareid,user_id,fs_id,root_path) :
                print('已转存:',share_name,',共%s个文件' % str(len(share_list)))
            else:
                print('转存失败:',share_name)
        else:
            # 分享链接中的文件夹在百度盘中存在,则对比目录中的内容
            current_path = root_path + '/' + share_name
            current_file_list = self.get_file_list(current_path)
            # 逐一对比,不存在则转存
            for file in share_list:
                if file[0] not in current_file_list:
                    if self.transfer_file(shareid,user_id,file[1],current_path):
                        print('已转存:',share_name,file[0])
                    else:
                        print('转存失败:',share_name,file[0])

def main():
    STOKEN = ''
    BDUSS  = ''
    pan = BaiDuPan(BDUSS,STOKEN)

    link_url  = 'https://pan.baidu.com/s/xxxxxxxxxxxxxxxxxxxxxxx'
    pass_code = '1234'
    shareid,user_id,fs_id,share_name,share_list = pan.get_share_file_list(link_url, pass_code)
    print(shareid,user_id,fs_id,share_name,share_list)

    root_path = '/Test'
    file_list = pan.get_file_list(root_path)
    print(file_list)

    res = pan.transfer_file(shareid,user_id,fs_id,dir_name)
    print(res)

    pan.smart_save(link_url,pass_code,root_path)

if __name__ == '__main__':
    main()
hxz393 commented 4 years ago

挺好的.bdstoken在早期版本我也没有去获取,后面是建立文件夹要用到. 有一个问题不知道你有没有注意到,有一种链接通过提取码获得非文件夹的多个共享文件,用我的软件转存只会转存第一个文件.可是当时没注意,现在也不知道是怎么生成的链接.

xJogger commented 4 years ago

挺好的.bdstoken在早期版本我也没有去获取,后面是建立文件夹要用到. 有一个问题不知道你有没有注意到,有一种链接通过提取码获得非文件夹的多个共享文件,用我的软件转存只会转存第一个文件.可是当时没注意,现在也不知道是怎么生成的链接.

那种链接是选中多个文件之后,再选择分享生成的链接。 比如:https://pan.baidu.com/s/182A8FJ02gCq1MWYyrm_emA fm9k

撸了一段代码:

import os
import re
import urllib
import json

import requests
from fake_useragent import UserAgent

class BaiDuPan(object):

    def __init__(self,BDUSS,STOKEN):
        cookies = {
            'BDUSS'  : BDUSS,
            'STOKEN' : STOKEN
        }

        ua = UserAgent()
        headers = {
            'User-Agent': ua.firefox,
            'Referer': 'https://pan.baidu.com',
        }

        self.session = requests.Session()
        self.session.headers.update(headers)
        self.session.cookies =  requests.cookies.cookiejar_from_dict(cookies)

    # 验证链接函数
    def get_multi_file_link_info(self,link_url, pass_code):
        # 验证提取码
        check_url = 'https://pan.baidu.com/share/verify?surl=' + link_url[25:48]
        post_data = {'pwd': pass_code, 'vcode': '', 'vcode_str': '', }
        response_post = self.session.post(url=check_url,data=post_data,timeout=10,allow_redirects=False)
        bdclnd = response_post.json()['randsk']
        self.session.cookies =  requests.utils.add_dict_to_cookiejar(self.session.cookies,{'BDCLND':bdclnd})
        # 获取文件信息
        response = self.session.get(url=link_url,timeout=15,allow_redirects=True).content.decode("utf-8")
        share_info_json   = json.loads(re.findall(r'yunData\.setData\((.*)\);', response)[0])
        shareid = share_info_json['shareid']
        user_id = share_info_json['uk']
        file_list = [ [i['fs_id'],i['server_filename']] for i in share_info_json['file_list']['list'] ]
        return [shareid,user_id,file_list]
hxz393 commented 4 years ago

谢谢,我下次更新进去 Cookies还是不解析比较好,百度不时要改规则

xJogger commented 4 years ago

谢谢,我下次更新进去 Cookies还是不解析比较好,百度不时要改规则

嗯,不解析Cookie感觉适用性更好点。

另外,在参考大佬的代码的时候,发现在复制请求头的Cookie的时候,用火狐复制的话程序跑不了,必须用Chrome复制,观察了下是因为火狐把有些比较长的Cookie中间用省略号省略了。

之后就是检测完提取码,更新Cookie的这一句

request_header['Cookie'] = re.sub(r'BDCLND=(\S+?);', r'BDCLND=' + bdclnd + ';', request_header['Cookie'])

改成

if 'BDCLND' in request_header['Cookie'] :
    request_header['Cookie'] = re.sub(r'BDCLND=(\S+?);', r'BDCLND=' + bdclnd + ';', request_header['Cookie'])
else:
    request_header['Cookie'] = request_header['Cookie'] + ';BDCLND=%s' % bdclnd

会好点,因为有时候从浏览器复制的Cookie里不一定有BDCLND这个值,直接替换的话可能会导致Cookie更新失败,我就遇到了这个问题。

hxz393 commented 4 years ago

火狐还真是这样..但右键复制出来是完整的Cookie 百度更新了,现在不同UA获取到的Cookie值完全不同,我得改一改程序.

ldsxp commented 4 years ago

我有个疑问,咱们都用3.6+了,为什么不用 f-strings。fake_useragent 可能会因为网络问题不能使用。

hxz393 commented 4 years ago

习惯问题,你不说我都没想起f-strings来着哈哈,还要继续学习呀

xJogger commented 4 years ago

我有个疑问,咱们都用3.6+了,为什么不用 f-strings。fake_useragent 可能会因为网络问题不能使用。

原来这个东西叫f-strings,之前见到过这个用法,但是没注意到前面得加个f。 fake_useragent 因为网络问题不能使用,是因为fake_useragent 的UA是在线获取的?

ldsxp commented 4 years ago

我有个疑问,咱们都用3.6+了,为什么不用 f-strings。fake_useragent 可能会因为网络问题不能使用。

原来这个东西叫f-strings,之前见到过这个用法,但是没注意到前面得加个f。 fake_useragent 因为网络问题不能使用,是因为fake_useragent 的UA是在线获取的?

是网络问题,所以我一般都是用本地的:

from fake_useragent import UserAgent

# fake-useragent fake_useragent.json 下载: https://fake-useragent.herokuapp.com/browsers/0.1.11
ua = UserAgent(path=os.path.join(BASE_DIR, 'data', 'fake_useragent_0.1.11.json'))
ldsxp commented 4 years ago

@xJogger @hxz393 我又在你们的基础上撸了一版。秒传(梦姬是什么?)没有链接可以测试,可能不能用,转存可以了。 还可以优化转存部分,我先用着了...

import os
import re
import json
import time
import base64
import random
from urllib.parse import quote

import urllib3
import requests
import requests.cookies

# 公共请求头
HEADERS = {
    'Host': 'pan.baidu.com',
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1',
    'Sec-Fetch-Dest': 'document',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Sec-Fetch-Site': 'same-site',
    'Sec-Fetch-Mode': 'navigate',
    'Referer': 'https://pan.baidu.com',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
}

def random_sleep(start=1, end=3, debug=True):
    """
    随机延迟,因为如果你访问了很多页面,你的 ip 可能会被封。
    """
    sleep_time = random.randint(start, end)
    if debug:
        print('随机延迟:%s 秒......' % sleep_time)
    time.sleep(sleep_time)

def parse_url_and_code(url):
    """解析网盘的链接和提取码"""
    url = url.lstrip('链接:').strip()
    link_url, pass_code = re.sub(r'提取码*[::](.*)', r'\1', url).split(' ', maxsplit=1)
    pass_code = pass_code.strip()[:4]
    return link_url, pass_code

def parse_rapid(url_code):
    """解析秒传链接"""
    # 处理梦姬标准(4FFB5BC751CC3B7A354436F85FF865EE#797B1FFF9526F8B5759663EC0460F40E#21247774#秒传.rar)
    if url_code.count('#') > 2:
        md5, slice_md5, length, name = url_code.split('#', maxsplit=3)
    # 处理游侠 v1标准(bdlink=)
    elif bool(re.search('bdlink=', url_code, re.IGNORECASE)):
        md5, slice_md5, length, name = base64.b64decode(re.findall(r'bdlink=(.+)', url_code)[0]).decode(
            "utf-8").strip().split('#', maxsplit=3)
    # 处理PanDL标准(bdpan://)
    elif bool(re.search('bdpan://', url_code, re.IGNORECASE)):
        bdpan_data = base64.b64decode(re.findall(r'bdpan://(.+)', url_code)[0]).decode(
            "utf-8").strip().split('|')
        md5, slice_md5, length, name = bdpan_data[2], bdpan_data[3], bdpan_data[1], bdpan_data[0]
    # 处理PCS-Go标准(BaiduPCS-Go)
    elif bool(re.search('BaiduPCS-Go', url_code, re.IGNORECASE)):
        md5 = re.findall(r'-md5=(\S+)', url_code)[0]
        slice_md5 = re.findall(r'-slicemd5=(\S+)', url_code)[0]
        length = re.findall(r'-length=(\S+)', url_code)[0]
        name = re.findall(r'-crc32=\d+\s(.+)', url_code)[0].replace('"', '').replace('/', '\\').strip()
    else:
        return None

    return {'md5': md5, 'slice_md5': slice_md5, 'length': length, 'name': name}

def check_link_type(link_list_line):
    """检测链接种类"""
    if link_list_line.find('https://pan.baidu.com/s/') >= 0:
        link_type = '/s/'
    elif bool(re.search('(bdlink=|bdpan://|BaiduPCS-Go)', link_list_line, re.IGNORECASE)):
        link_type = 'rapid'
    elif link_list_line.count('#') > 2:
        link_type = 'rapid'
    else:
        link_type = 'unknown'
    return link_type

class BaiDuPan:

    def __init__(self, headers=None):

        self.bdstoken = None

        if headers is None:
            self.headers = dict(HEADERS)
        else:
            self.headers = headers

        # 默认超时时间 https://2.python-requests.org//zh_CN/latest/user/advanced.html#timeout
        self.timeout = 10

        self.session = requests.Session()

        # 删除SSL验证
        self.session.verify = False
        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

    def load_cookie_login(self, cookie, user_agent=None, ):
        """
        加载 cookie 登录帐号

        加载登录信息以后, 如果没有正常获取 self.bdstoken 说明我们登录失败了
        """

        if user_agent is None:
            self.headers[
                'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
        else:
            self.headers['User-Agent'] = user_agent

        self.headers['Cookie'] = cookie
        self.session.headers.update(self.headers)
        # self.session.cookies = requests.cookies.cookiejar_from_dict(cookies)  # 我们没用使用她
        self.get_bdstoken()
        if self.bdstoken is None:
            raise ValueError('获取 bdstoken 失败!')

    def get_bdstoken(self):
        """获取 bdstoken 函数"""
        url = 'https://pan.baidu.com/disk/home'
        response = self.session.get(url=url, timeout=self.timeout, allow_redirects=True)
        bdstoken_list = re.findall("'bdstoken',\\s'(\\S+?)'", response.text)
        if bdstoken_list:
            self.bdstoken = bdstoken_list[0]
        else:
            self.bdstoken = None
        return self.bdstoken

    def verify_link(self, link_url, pass_code):
        """验证链接"""

        url = f'https://pan.baidu.com/share/verify?surl={link_url[25:48]}'  # + f'&bdstoken={self.bdstoken}'
        post_data = {'pwd': pass_code, 'vcode': '', 'vcode_str': '', }
        response = self.session.post(url=url, data=post_data, timeout=self.timeout)
        data = response.json()
        # print(data)
        if data['errno'] == 0:
            bdclnd = data['randsk']
            cookie = self.session.headers['Cookie']
            if 'BDCLND=' in cookie:
                cookie = re.sub(r'BDCLND=(\S+?);', f'BDCLND={bdclnd};', cookie)
            else:
                cookie += f';BDCLND={bdclnd}'
            self.session.headers['Cookie'] = cookie
            # print(self.session.headers['Cookie'])
            return data
        elif data['errno'] == -12:
            raise ValueError('提取码错误')
        elif data['errno'] == -62:
            raise ValueError('错误尝试次数过多,请稍后再试!')
        else:
            raise ValueError(data)

    def get_link_info(self, link_url, pass_code):
        """
        获取文件信息

        这里可以获取下载的文件列表,但是路径只有第一个
        """
        # 验证提取码
        self.verify_link(link_url, pass_code)
        random_sleep(start=1, end=3, debug=True)

        # 获取文件信息
        response = self.session.get(url=link_url, timeout=self.timeout)
        text = response.text
        shareid_list = re.findall('"shareid":(\\d+?),"', text)
        user_id_list = re.findall('"uk":(\\d+?),"', text)
        fs_id_list = re.findall('"fs_id":(\\d+?),"', text)
        path = re.findall('"path":"(.+?)","', text)[0].replace('\\/', '/').encode("utf-8").decode(
            "unicode-escape")
        if not shareid_list:
            raise ValueError('没获取到 shareid')
        elif not user_id_list:
            raise ValueError('没获取到 user_id')
        elif not fs_id_list:
            raise ValueError('文件已经被删除')
        else:
            return {'shareid': shareid_list[0], 'user_id': user_id_list[0], 'fs_id_list': fs_id_list, 'path': path}

    def get_share_link_info(self, link_url, pass_code):
        """获取共享链接的信息"""
        # 验证提取码
        self.verify_link(link_url, pass_code)
        random_sleep(start=1, end=3, debug=True)

        # 获取文件信息
        response = self.session.get(url=link_url, timeout=self.timeout)
        link_info = json.loads(re.findall(r'yunData\.setData\((.*)\);', response.text)[0])
        # print(link_info)
        return link_info

    def get_link_data(self, link_url, pass_code):
        """获取链接数据"""
        link_info = self.get_share_link_info(link_url, pass_code)
        # print(link_info)
        shareid = link_info['shareid']
        user_id = link_info['uk']
        file_list = [{'fs_id': i['fs_id'], 'filename': i['server_filename'], 'isdir': i['isdir']} for i in
                     link_info['file_list']['list']]
        return {'shareid': shareid, 'user_id': user_id, 'file_list': file_list}

    def get_share_file_list(self, user_id, shareid, file_path):
        """获取分享文件列表"""

        file_path = quote(file_path).replace('/', '%2F')
        url = f'https://pan.baidu.com/share/list?uk={user_id}&shareid={shareid}&order=other&desc=1&showempty=0&web=1&page=1&num=1000&dir={file_path}'
        response = self.session.get(url=url, timeout=self.timeout)
        data = response.json()
        print(data)
        if data['errno'] == 0:
            return data['list']
        else:
            raise ValueError(data)

    def get_file_list(self, path=None, order='name', page=1, num=1000):
        """
        获取目录列表

        :param path: 这里需要完整路径,例如:path='/共享'
        :param order: 可用的排序: asc 升序、-desc 降序、time 按时间排序、name 按文件名排序、size 按大小排序
        :return:
        """
        if not path:
            path = '/'
        path = quote(path).replace('/', '%2F')
        url = f'https://pan.baidu.com/api/list?order={order}&desc=1&showempty=0&web=1&page={page}&num={num}&dir={path}'
        # 我没有看接口是否需要 bdstoken,如果需要那么加上 &bdstoken={self.bdstoken}
        response = self.session.get(url=url, timeout=self.timeout)  # , allow_redirects=False
        data = response.json()
        if data['errno'] == 0:
            return data['list']
        else:
            raise ValueError(data)

    def get_filename_list(self, path=None, order='name'):
        """
        获取目录中的文件名列表

        注意:需要完整路径,例如:path='/共享'
        """
        file_list = self.get_file_list(path, order)
        if file_list:
            return [data['server_filename'] for data in file_list]
        else:
            return []

    def create_dir(self, dir_name):
        ...
        url = f'https://pan.baidu.com/api/create?a=commit&bdstoken={self.bdstoken}'
        post_data = {'path': dir_name, 'isdir': '1', 'block_list': '[]', }
        response = self.session.post(url=url, data=post_data, timeout=self.timeout)
        data = response.json()
        if data['errno'] == 0:
            return data
        else:
            raise ValueError(data)

    def transfer_files(self, shareid, user_id, fs_id_list, dir_name='/'):
        """
        转存文件
        :param shareid: 共享ID
        :param user_id: 共享文件用户的ID
        :param fs_id_list: 文件ID列表
        :param dir_name: 转存位置
        :return:
        """
        url = f'https://pan.baidu.com/share/transfer?shareid={shareid}&from={user_id}'  # + f'&bdstoken={self.bdstoken}'
        if not dir_name.strip().startswith('/'):
            dir_name = '/' + dir_name.strip()
        fsidlist = f"[{','.join(i for i in fs_id_list)}]"
        post_data = {'fsidlist': fsidlist, 'path': dir_name, }
        response = self.session.post(url=url, data=post_data, timeout=self.timeout)
        data = response.json()
        if data['errno'] == 0:
            return data
        elif data['errno'] == 12:
            raise ValueError('转存失败:目录中已有同名文件存在')
        else:
            raise ValueError(data)

    def transfer_files_rapid(self, rapid_data, dir_name='/'):
        """转存秒传链接"""
        url = 'https://pan.baidu.com/api/rapidupload?bdstoken={self.bdstoken}'
        if not dir_name.strip().startswith('/'):
            dir_name = '/' + dir_name.strip()

        post_data = {'path': f"{dir_name}/{rapid_data['name']}", 'content-md5': rapid_data['md5'],
                     'slice-md5': rapid_data['slice_md5'], 'content-length': rapid_data['length']}
        response = self.session.post(url=url, data=post_data, timeout=self.timeout)
        data = response.json()
        if data['errno'] == 404:
            post_data['content-md5'] = post_data['content-md5'].lower()
            post_data['slice-md5'] = post_data['slice-md5'].lower()
            response = self.session.post(url=url, data=post_data, timeout=self.timeout)
            data = response.json()

        if data['errno'] == 0:
            return data
        elif data['errno'] == -8:
            raise ValueError('转存失败:目录中已有同名文件存在')
        elif data['errno'] == 404:
            raise ValueError('转存失败:链接无效')
        elif data['errno'] == 2:
            raise ValueError('转存失败:非法路径')
        elif data['errno'] == -7:
            raise ValueError('转存失败:非法文件名')
        elif data['errno'] == -10:
            raise ValueError('转存失败:容量不足')
        elif data['errno'] == 114514:
            raise ValueError('转存失败:接口调用失败')
        else:
            raise ValueError(data)

    def smart_save(self, link_url, pass_code, root_path):
        """
        转存文件

        警告:不要用这个,还没有改为新的方法
        """
        current_root_list = self.get_filename_list(root_path)
        shareid, user_id, fs_id, share_name, share_list = self.get_share_file_list(link_url, pass_code)
        # 如果分享链接中的文件夹在百度盘里不存在,直接转存整个文件夹
        print('资源:', share_name)
        if share_name not in current_root_list:
            if self.transfer_file(shareid, user_id, fs_id, root_path):
                print('已转存:', share_name, ',共%s个文件' % str(len(share_list)))
            else:
                print('转存失败:', share_name)
        else:
            # 分享链接中的文件夹在百度盘中存在,则对比目录中的内容
            current_path = root_path + '/' + share_name
            current_file_list = self.get_file_list(current_path)
            # 逐一对比,不存在则转存
            for file in share_list:
                if file[0] not in current_file_list:
                    if self.transfer_file(shareid, user_id, file[1], current_path):
                        print('已转存:', share_name, file[0])
                    else:
                        print('转存失败:', share_name, file[0])

def transfer_example(pan, url_code, dir_name='/'):
    """转存文件示例"""

    link_url, pass_code = parse_url_and_code(url_code)
    print(link_url, pass_code)

    link_data = pan.get_link_data(link_url, pass_code)
    shareid, user_id = link_data['shareid'], link_data['user_id']
    print('转存的文件列表', [str(data['filename']) for data in link_data['file_list']])
    fs_id_list = [str(data['fs_id']) for data in link_data['file_list']]

    # 另一种获取转存信息的方法,如果示例不能用的时候再用这个
    # link_info = pan.get_link_info(link_url, pass_code)
    # print('get_link_info', link_info)
    # shareid, user_id, fs_id_list = link_info['shareid'], link_info['user_id'], link_info['fs_id_list']

    # 转存文件
    random_sleep(start=1, end=3, debug=True)
    print('transfer_files', pan.transfer_files(shareid, user_id, fs_id_list, dir_name))

if __name__ == '__main__':
    pan = BaiDuPan()
    pan.load_cookie_login(cookie, user_agent=ua, )
    transfer_example(pan, '链接: https://pan.baidu.com/s/xxxxxxxxxxxxxxxxxxxxxxx 提取码: xxxx', '/')