Hey Guys I am trying to (remove) unlist number from Truecaller using following link
https://www.truecaller.com/unlisting
I want to automate this process but because of recaptcha of google the requests are limited and cant possible so it it any way to do this using any library like unofficial libraries of Truecaller like Trunofficial .
https://www.truecaller.com/unlisting
Unlisting your phone number requires reCaptcha, you can bypass it.
This my be helpful
import requests
from requests.structures import CaseInsensitiveDict
url = "https://asia-south1-truecaller-web.cloudfunctions.net/api/noneu/unlist/v1"
headers = CaseInsensitiveDict()
headers["Authorization"] = "Bearer null"
headers["Content-Type"] = "application/json"
headers["Content-Length"] = "612"
headers["accept"] = "*/*"
headers["sec-gpc"] = "1"
headers["origin"] = "https://www.truecaller.com"
headers["sec-fetch-site"] = "cross-site"
headers["sec-fetch-mode"] = "cors"
headers["sec-fetch-dest"] = "empty"
headers["referer"] = "https://www.truecaller.com/"
headers["accept-encoding"] = "gzip, deflate, br"
headers["accept-language"] = "en-IN,en-GB;q=0.9,en-US;q=0.8,en;q=0.7"
data = {
"phoneNumber": "+919912345678",
"recaptcha": "03ANYolqtbEiFqaQ8wBrDF3kKqkCzIaH4r79oA2hCNd80gZGENvff9fPKocccytf6QXpPvQfQ12WMvgfdP1IKggff6lTY_0ucZxFB7r6A_dbNjfp_NSYtrkU4NX1h_LBQgnCO0ALkWS8CMjaIEjhxclfeClFv4EmFNEQis1OvrSVgvB8nJipuUxGakpa0eB8yWrEQCUfy0Gs7VA2hO4VaeLRTwr6BaxYsJsCP_3-vaMP2crZDDrIm8on_0H0vqh-1S44y69b0rSM6_ornuVZxeNQkpe_3NvPjQQxqQtdyQl
d55OQkK67PH7OH_A7s3GVgMa0VCOuX_UdBsPkd8mKf708GgutggfggvVrbe3DrBsUnpXMYchsv_revkhknej0G_SxAtqtwQoGPtt5iKSKHRmlelDJpYuQs6Lwi-4Umn_E
clRPT2iaohxZ3r8O_4jaGP9yhRiMyVkgTm6mutJn50nPFbyabjSqgC2ShlMEI7IoOqWp9g90b2bl4qw4h6k9vP4AVy36sCx2z_gksBEgxT1zsM3P77PQ_guo12k7rtFlUAmvdqhqgwowaKQFMMBfjWDo40"
}
resp = requests.post(url, headers=headers, data=data)
print(resp.json())
# {
# "lastUpdated": "2022-07-11T03:27:30.306713Z",
# "phoneNumber": "919912345678",
# "status": "unlisted"
# }
Related
Using Python, how do I make a request to the shopee API to get a list of products on offer with my affiliate link?
I've made several scripts, but they all have a signature issue or unsupported authentication attempt. Does anyone have a working example of how to do this?
Below are two code examples I made, but they don't work.
Query: productOfferV2 and shopeeOfferV2
code1:
import requests
import time
import hashlib
appID = '18341090114'
secret = 'XMAEHHWQD3OEGQX5P33AFRREJEDSQX76'
# Set the API endpoint URL
url = "https://open-api.affiliate.shopee.com.my/graphql"
payload = """
{
"query": "query Fetch($page:2){
productOfferV2(
listType: 0,
sortType: 2,
page: $page,
limit: 50
) {
nodes {
commissionRate
commission
price
productLink
offerLink
}
}
}",
"operationName": null,
"variables":{
"page":0
}
}
"""
payload = payload.replace('\n', '').replace(':0', f':{2}')
timestamp = int(time.time())
factor = appID+str(timestamp)+payload+secret
signature = hashlib.sha256(factor.encode()).hexdigest()
# Set the request headers
headers = {
'Content-type': 'application/json',
'Authorization': f'SHA256 Credential={appID},Timestamp={timestamp},Signature={factor}'
}
# Send the POST request
response = requests.post(url, payload, headers=headers)
data = response.json()
print(data)
return = error-invalid-signature-python
code2:
import requests
import json
import hmac
import hashlib
appID = '18341090114'
secret = 'XMAEHHWQD3OEGQX5P33AFRREJEDSQX76'
query = '''query { productOfferV2(item_id: ALL) {
offers {
shop_id
item_price
discount_price
offer_id
shop_location
shop_name
}
}
}'''
def generate_signature(query, secret):
signature = hmac.new(secret.encode(), query.encode(), hashlib.sha256).hexdigest()
return signature
signature = generate_signature(query, secret)
url = 'https://open-api.affiliate.shopee.com.my/graphql'
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer ' + appID + ':' + signature
}
payload = {
'query': query
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
data = response.json()
print(data)
I am sending the post request to the TAP PAYMENT GATEWAY in order to save the card, the url is expecting two parameters like one is the source (the recently generated token) and inside the url the {customer_id}, I am trying the string concatenation, but it is showing the error like Invalid JSON request.
views.py:
ifCustomerExits = CustomerIds.objects.filter(email=email)
totalData = ifCustomerExits.count()
if totalData > 1:
for data in ifCustomerExits:
customerId = data.customer_id
print("CUSTOMER_ID CREATED ONE:", customerId)
tokenId = request.session.get('generatedTokenId')
payload = {
"source": tokenId
}
headers = {
'authorization': "Bearer sk_test_**********************",
'content-type': "application/json"
}
# HERE DOWN IS THE url of TAP COMPANY'S API:
url = "https://api.tap.company/v2/card/%7B"+customerId+"%7D"
response = requests.request("POST", url, data=payload, headers=headers)
json_data3 = json.loads(response.text)
card_id = json_data3["id"]
return sponsorParticularPerson(request, sponsorProjectId)
Their expected url = https://api.tap.company/v2/card/{customer_id}
Their documentation link: https://tappayments.api-docs.io/2.0/cards/create-a-card
Try this..
First convert dict. into JSON and send post request with request.post:
import json
...
customerId = str(data.customer_id)
print("CUSTOMER_ID CREATED ONE:", customerId)
tokenId = request.session.get('generatedTokenId')
payload = {
'source': tokenId
}
headers = {
'authorization': "Bearer sk_test_**************************",
'content-type': "application/json"
}
pd = json.dumps(payload)
# HERE DOWN IS THE url of TAP COMPANY'S API:
url = "https://api.tap.company/v2/card/%7B"+customerId+"%7D"
response = requests.post(url, data=pd, headers=headers)
json_data3 = json.loads(response.text)
card_id = json_data3["id"]
return sponsorParticularPerson(request, card_id)
Please tell me this works or not...
I am currently trying to update a Sharepoint 2013 list.
This is the module that I am using using to accomplish that task. However, when I run the post task I receive the following error:
"b'{"error":{"code":"-1, Microsoft.SharePoint.Client.InvalidClientQueryException","message":{"lang":"en-US","value":"Invalid JSON. A token was not recognized in the JSON content."}}}'"
Any idea of what I am doing wrong?
def update_item(sharepoint_user, sharepoint_password, ad_domain, site_url, sharepoint_listname):
login_user = ad_domain + '\\' + sharepoint_user
auth = HttpNtlmAuth(login_user, sharepoint_password)
sharepoint_url = site_url + '/_api/web/'
sharepoint_contextinfo_url = site_url + '/_api/contextinfo'
headers = {
'accept': 'application/json;odata=verbose',
'content-type': 'application/json;odata=verbose',
'odata': 'verbose',
'X-RequestForceAuthentication': 'true'
}
r = requests.post(sharepoint_contextinfo_url, auth=auth, headers=headers, verify=False)
form_digest_value = r.json()['d']['GetContextWebInformation']['FormDigestValue']
item_id = 4991 # This id is one of the Ids returned by the code above
api_page = sharepoint_url + "lists/GetByTitle('%s')/items(%d)" % (sharepoint_listname, item_id)
update_headers = {
"Accept": "application/json; odata=verbose",
"Content-Type": "application/json; odata=verbose",
"odata": "verbose",
"X-RequestForceAuthentication": "true",
"X-RequestDigest": form_digest_value,
"IF-MATCH": "*",
"X-HTTP-Method": "MERGE"
}
r = requests.post(api_page, {'__metadata': {'type': 'SP.Data.TestListItem'}, 'Title': 'TestUpdated'}, auth=auth, headers=update_headers, verify=False)
if r.status_code == 204:
print(str('Updated'))
else:
print(str(r.status_code))
I used your code for my scenario and fixed the problem.
I also faced the same problem. I think the way that data passed for update is not correct
Pass like below:
json_data = {
"__metadata": { "type": "SP.Data.TasksListItem" },
"Title": "updated title from Python"
}
and pass json_data to requests like below:
r= requests.post(api_page, json.dumps(json_data), auth=auth, headers=update_headers, verify=False).text
Note: I used SP.Data.TasksListItem as it is my type. Use http://SharePointurl/_api/web/lists/getbytitle('name')/ListItemEntityTypeFullName to find the type
I have the following Python code that makes a POST request to Clarifai's demographics endpoint:
import requests
import pprint
headers = {
"Authorization": "Key MY_KEY",
"Content-Type": "application/json"
}
data = {"inputs": [{"data": {"image": {"url": "https://samples.clarifai.com/demographics.jpg"}}}]}
proxies = {
"http": "MY_HTTP_PROXY",
"https": "MY_HTTPS_PROXY"
}
response = requests.post('https://api.clarifai.com/v2/models/c0c0ac362b03416da06ab3fa36fb58e3/outputs', headers=headers, data=data, proxies=proxies, verify=False)
pprint.pprint(response.json())
Note that I've replaced my real api key and proxies with MY_KEY, MY_HTTP_PROXY, and MY_HTTPS_PROXY respectively.
Does anyone experienced with Clarifai know what I'm doing wrong? I saw an example of working code posted on Clarifai's own forum, but I can't see any major differences between the working code and mine.
Just convert the data passed to json.
import requests
import pprint
import json
headers = {
"Authorization": "Key MY_KEY",
"Content-Type": "application/json"
}
data = {"inputs": [{"data": {"image": {"url": "https://samples.clarifai.com/demographics.jpg"}}}]}
json_data = json.dumps(data)
proxies = {
"http": "MY_HTTP_PROXY",
"https": "MY_HTTPS_PROXY"
}
response = requests.post('https://api.clarifai.com/v2/models/c0c0ac362b03416da06ab3fa36fb58e3/outputs', headers=headers, data=json_data, proxies=proxies, verify=False)
pprint.pprint(response.json())
Needed quotes around the data variable
'data = {"inputs": [{"data": {"image": {"url": "https://samples.clarifai.com/demographics.jpg"}}}]}'
I am trying to get all results from https://www.ncl.com/. I found that the request must be GET and sent to this link:https://www.ncl.com/search_vacations
so far i got the first 12 results and there is no problem parsing them. The problem is i cannot find a way to "change" the page of the results. I get 12 of 499 and i need to get them all. I've tried to do this https://www.ncl.com/search_vacations?current_page=1 and increment it every time but i get the same (first) result every time. Tried adding json body to the request json = {"current_page": '1'} again with no success.
This is my code so far:
import math
import requests
session = requests.session()
proxies = {'https': 'https://97.77.104.22:3128'}
headers = {
"authority": "www.ncl.com",
"method": "GET",
"path": "/search_vacations",
"scheme": "https",
"accept": "application/json, text/plain, */*",
"connection": "keep-alive",
"referer": "https://www.ncl.com",
"cookie": "AkaUTrackingID=5D33489F106C004C18DFF0A6C79B44FD; AkaSTrackingID=F942E1903C8B5868628CF829225B6C0F; UrCapture=1d20f804-718a-e8ee-b1d8-d4f01150843f; BIGipServerpreprod2_www2.ncl.com_http=61515968.20480.0000; _gat_tealium_0=1; BIGipServerpreprod2_www.ncl.com_r4=1957341376.10275.0000; MP_COUNTRY=us; MP_LANG=en; mp__utma=35125182.281213660.1481488771.1481488771.1481488771.1; mp__utmc=35125182; mp__utmz=35125182.1481488771.1.1.utmccn=(direct)|utmcsr=(direct)|utmcmd=(none); utag_main=_st:1481490575797$ses_id:1481489633989%3Bexp-session; s_pers=%20s_fid%3D37513E254394AD66-1292924EC7FC34CB%7C1544560775848%3B%20s_nr%3D1481488775855-New%7C1484080775855%3B; s_sess=%20s_cc%3Dtrue%3B%20c%3DundefinedDirect%2520LoadDirect%2520Load%3B%20s_sq%3D%3B; _ga=GA1.2.969979116.1481488770; mp__utmb=35125182; NCL_LOCALE=en-US; SESS93afff5e686ba2a15ce72484c3a65b42=5ecffd6d110c231744267ee50e4eeb79; ak_location=US,NY,NEWYORK,501; Ncl_region=NY; optimizelyEndUserId=oeu1481488768465r0.23231006365903206",
"Proxy-Authorization": "Basic QFRLLTVmZjIwN2YzLTlmOGUtNDk0MS05MjY2LTkxMjdiMTZlZTI5ZDpAVEstNWZmMjA3ZjMtOWY4ZS00OTQxLTkyNjYtOTEyN2IxNmVlMjlk"
}
def get_count():
response = requests.get(
"https://www.ncl.com/search_vacations?cruise=1&cruiseTour=0&cruiseHotel=0&cruiseHotelAir=0&flyCruise=0&numberOfGuests=4294953449&state=undefined&pageSize=10¤tPage=",
proxies=proxies)
tmpcruise_results = response.json()
tmpline = tmpcruise_results['meta']
total_record_count = tmpline['aggregate_record_count']
return total_record_count
total_cruise_count = get_count()
total_page_count = math.ceil(int(total_cruise_count) / 10)
session.headers.update(headers)
cruises = []
page_counter = 1
while page_counter <= total_page_count:
url = "https://www.ncl.com/search_vacations?current_page=" + str(page_counter) + ""
page = requests.get(url, headers=headers, proxies=proxies)
cruise_results = page.json()
for line in cruise_results['results']:
cruises.append(line)
print(line)
page_counter += 1
print(cruise_results['pagination']["current_page"])
print("----------")
print(len(cruises))
Using requests and a proxy. Any ideas how to do that?
The website claims to have 12264 search results (for a blank search), organised in pages of 12.
The search url takes a parameter Nao which seems to define the search result offset from which your result page will start.
So fetching https://www.ncl.com/uk/en/search_vacations?Nao=45
should get a "page" of 12 search results, starting with result 46.
and sure enough:
"pagination": {
"starting_record": "46",
"ending_record": "57",
"current_page": "4",
"start_page": "1",
...
So to page through all results, start with Nao = 0 and add 12 for each fetch.