Closed ssvaccine closed 2 years ago
l use this for get id ans book
def numbook(qu_url ,chatid,msgid ,userid):
try:
chuk = {
"User-Agent":"PostmanRuntime/7.28.0",
"Accept": "/",
"Cache-Control":"no-cache",
"Postman-Token": "04d2a2c9-63ab-43bf-bfcb-e7f38a92beb4",
"Host":"www.chegg.com",
"Accept-Encoding":"gzip, deflate, br",
"Connection": "keep-alive",
"Cookie": "C=0;CSessionID=6f7a55cd-cf95-485c-ba1c-e3b122cd1d7b;O=0; PHPSESSID=837161d45166559ee95dfa1bdde1e371; U=0; V=ba35049ebce8516446423128043519ce6074891676d8c5.51631027; exp=A184A%7CA311C%7CA803B%7CC024A%7CA560B; expkey=BEE682351558F2E82EA91564D646A8A1; user_geo_location=%7B%22country_iso_code%22%3A%22US%22%2C%22country_name%22%3A%22United+States%22%2C%22region%22%3A%22VA%22%2C%22region_full%22%3A%22Virginia%22%2C%22city_name%22%3A%22Ashburn%22%2C%22postal_code%22%3A%2220149%22%2C%22locale%22%3A%7B%22localeCode%22%3A%5B%22en-US%22%5D%7D%7D"}
urk = str(qu_url)`
while True:
############
r = requests.get(str(urk), headers=chuk)
#print(r)
soup = s(r.content, 'html.parser')
images4 = soup.find_all("script")
cc = str(images4)
zx = cc.find("isbn13")
#print(zx)
#print((cc[zx:(zx + 34)]))
text = str(soup)
start = text.find('''chapterData''') + 1
end = text.find('''chapterMap''')
cut_text = text[start:end].strip()
#print(cut_text)
###############################
text = str(cut_text)
start = text.find("problemId") + 1
end = text.find('''''')
cut_text3 = text[start:].strip()
#print(cut_text3)
####################
text = str(cut_text)
start = text.find('''chapterId''') + 1
end = text.find('''problemData''')
cut_text2 = text[start:end].strip()
#print(cut_text2)
if zx == -1:
continue
else:
# import requests
import json
import re
# print((take[x:(x+20)]))
# print((take[x2:(x2+26)]))
# print((take[x3:(x3+22)]))
string = str(cut_text2)
pattern = '\d+'
result = re.findall(pattern, string)
#print(result[0])
z1 = result[0]
######
string = (cc[zx:(zx + 34)])
pattern = '\d+'
result = re.findall(pattern, string)
#print(result[1])
z2 = result[1]
##########
string = str(cut_text3)
pattern = '\d+'
result = re.findall(pattern, string)
#print(result[0])
z3 = result[0]
rw=ansboook(z2 ,z3 ,chatid,msgid ,userid)
break
except:
pass
`
For example, the link is Chapter 3, question 15. The bot always return Chapter 3, question 1. I found out that the reason is bot found the 1st question id in every link. I tried to solve it by re-find the correct question id, but unfortunately the page denied me to access the answer page. It returns unauthorized. Thank you!
👍
For example, the link is Chapter 3, question 15. The bot always return Chapter 3, question 1. I found out that the reason is bot found the 1st question id in every link. I tried to solve it by re-find the correct question id, but unfortunately the page denied me to access the answer page. It returns unauthorized. Thank you!
👍
thank you for your help! I also found the id and send request for it, but its return Unauthorized. How can you solve it? Thanks!
For example, the link is Chapter 3, question 15. The bot always return Chapter 3, question 1. I found out that the reason is bot found the 1st question id in every link. I tried to solve it by re-find the correct question id, but unfortunately the page denied me to access the answer page. It returns unauthorized. Thank you!