Pythonic way to pass input variables to fstrings in another file - python

I have a project for my company where I need to go to an internal website that downloads an excel file with factory data. I have two files link_file.py that stores the links to the websites and main.py. This script compares two factories to each other and I need to input different factory codes for each run so I was wondering, what is the most pythonic way to get variables from the main.py file to the dynamic http addresses?
Currently I have fstrings in the main.py file but it looks cluttered with all of the links:
#main.py
import webbrowser
link1 = f"www.factorycode.com/{factory_code1}"
link2 = f"www.factorycode.com/{factory_code2}"
factory_code1 = "abc"
factory_code2 = "xyz"
webbrowser.open(link1)
webbrowser.open(link2)
I tried a solution using os and .format() but it still looks cluttered.
# link_file.py
link1 = f"www.factorycode.com/{factory_code1}"
link2 = f"www.factorycode.com/{factory_code2}"
# main.py
import link_file
import webbrowser
import os
factory_code1 = "abc"
factory_code2 = "xyz"
link_file.link1.format(factory_code1= factory_code1)
link_file.link2.format(factory_code2= factory_code2)
webbrowser.open(link1)
webbrowser.open(link2)

Try
# link_file.py
link_prefix = "www.factorycode.com/"
# main.py
from link_file import link_prefix
import webbrowser
factory_code1 = "abc"
factory_code2 = "xyz"
link1 = f'{link_prefix}{factory_code1}'
link2 = f'{link_prefix}{factory_code2}'
webbrowser.open(link1)
webbrowser.open(link2)

This is what functions are for.
def open_link(code):
link = f"www.factorycode.com/{code}"
webbrowser.open(link)
codes = ['abc', 'xyz', 'def', 'jkl']
for code in codes:
open_link(code)

Related

how to get the text in one of the divs? (html)

I am writing my bot, which so far has to get the text from the div from one page and put it in a variable, but this does not work out and the variable always remains empty. How i can extract it?
import telebot;
import requests
from lxml import etree
import lxml.html
import csv
bot = telebot.TeleBot('');
#bot.message_handler(content_types=['text'])
def get_text_messages(message):
api = requests.get("https://slovardalja.net/word.php?wordid=21880")
tree = lxml.html.document_fromstring(api.text)
text_original = tree.xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/index/div[2]/p[1]/strong/text()')
print(text_original)
bot.send_message(message.chat.id,str(text_original))
bot.polling(none_stop=True, interval=0)
https://slovardalja.net/word.php?wordid=21880
I think this code should get the word "ОЛЕКВАС", I copied the path to it and added /text(), but it doesn't work
I have no cyrillic on my system, but with a smaller xpath value and the usage from text_content it print something on shell, hopefully it helps
api = requests.get("https://slovardalja.net/word.php?wordid=21880")
tree = lxml.html.document_fromstring(api.text)
text_original = tree.xpath('//div[#align="justify"]/p/strong')
print(text_original[0].text_content())

Is there a way to detect exisiting link from a text file in python

I have code in jupyter notebook with the help of requests to get confirmation on whether that url existed or not and after that prints out the output into the text file. Here is the line code for that
import requests
Instaurl = open("dictionaries/insta.txt", 'w', encoding="utf-8")
cli = ['duolingo', 'ryanair', 'mcguinness.paddy', 'duolingodeutschland', 'duolingobrasil']
exist=[]
url = []
for i in cli:
r = requests.get("https://www.instagram.com/"+i+"/")
if r.apparent_encoding == 'Windows-1252':
exist.append(i)
url.append("instagram.com/"+i+"/")
Instaurl.write(url)
Let's say that inside the cli list, i accidentally added the same existing username as before into the text file (duolingo for example). Is there a way where if the requests found the same URL from the text file, it would not be added into the the text file again?
Thank you!
You defined a list:
cli = ['duolingo', ...]
It sounds like you would prefer to define a set:
cli = {'duolingo', ...}
That way, duplicates will be suppressed.
It happens for dups in the initial
assignment, and for any duplicate cli.add(entry) you might attempt later.

How to convert requests.RequestsCookieJar to string

I have a requests.cookies.RequestCookieJar object which contains multiple cookies from different domain/path. How can I extract a cookies string for a particular domain/path following the rules mentioned in here?
For example
>>> r = requests.get("https://stackoverflow.com")
>>> print(r.cookies)
<RequestsCookieJar[<Cookie prov=4df137f9-848e-01c3-f01b-35ec61022540 for .stackoverflow.com/>]>
# the function I expect
>>> getCookies(r.cookies, "stackoverflow.com")
"prov=4df137f9-848e-01c3-f01b-35ec61022540"
>>> getCookies(r.cookies, "meta.stackoverflow.com")
"prov=4df137f9-848e-01c3-f01b-35ec61022540"
# meta.stackoverflow.com is also satisfied as it is subdomain of .stackoverflow.com
>>> getCookies(r.cookies, "google.com")
""
# r.cookies does not contains any cookie for google.com, so it return empty string
I think you need to work with a Python dictionary of the cookies. (See my comment above.)
def getCookies(cookie_jar, domain):
cookie_dict = cookie_jar.get_dict(domain=domain)
found = ['%s=%s' % (name, value) for (name, value) in cookie_dict.items()]
return ';'.join(found)
Your example:
>>> r = requests.get("https://stackoverflow.com")
>>> getCookies(r.cookies, ".stackoverflow.com")
"prov=4df137f9-848e-01c3-f01b-35ec61022540"
NEW ANSWER
Ok, so I still don't get exactly what it is you are trying to achieve.
If you want to extract the originating url from a requests.RequestCookieJar object (so that you could then check if there is a match with a given subdomain) that is (as far as I know) impossible.
However, you could off course do something like:
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import requests
import re
class getCookies():
def __init__(self, url):
self.cookiejar = requests.get(url).cookies
self.url = url
def check_domain(self, domain):
try:
base_domain = re.compile("(?<=\.).+\..+$").search(domain).group()
except AttributeError:
base_domain = domain
if base_domain in self.url:
print("\"prov=" + str(dict(self.cookiejar)["prov"]) + "\"")
else:
print("No cookies for " + domain + " in this jar!")
Then if you do:
new_instance = getCookies("https://stackoverflow.com")
You could then do:
new_instance.check_domain("meta.stackoverflow.com")
Which would give the output:
"prov=5d4fda78-d042-2ee9-9a85-f507df184094"
While:
new_instance.check_domain("google.com")
Would output:
"No cookies for google.com in this jar!"
Then, if you (if needed) fine-tune the regex & create a list of urls, you could first loop through the list to create many instances and save them in eg a list or dict. In a second loop you could check another list of urls to see if their cookies might be present in any of the instances.
OLD ANSWER
The docs you link to explain:
items()
Dict-like items() that returns a list of name-value
tuples from the jar. Allows client-code to call
dict(RequestsCookieJar) and get a vanilla python dict of key value
pairs.
I think what you are looking for is:
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import requests
def getCookies(url):
r = requests.get(url)
print("\"prov=" + str(dict(r.cookies)["prov"]) + "\"")
Now I can run it like this:
>>> getCookies("https://stackoverflow.com")
"prov=f7712c78-b489-ee5f-5e8f-93c85ca06475"
actually , when I just have the problem as you are. but when I access the Class Define
class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
I found a func called def get_dict(self, domain=None, path=None):
you can simply write code like this
raw = "rawCookide"
print(len(cookie))
mycookie = SimpleCookie()
mycookie.load(raw)
UCookie={}
for key, morsel in mycookie.items():
UCookie[key] = morsel.value
The following code is not promised to be "forward compatible" because I am accessing attributes of classes that were intentionally hidden (kind of) by their authors; however, if you must get into the attributes of a cookie, take a look here:
import http.cookies
import requests
import json
import sys
import os
aresponse = requests.get('https://www.att.com')
requestscookiejar = aresponse.cookies
for cdomain,cooks in requestscookiejar._cookies.items():
for cpath, cookgrp in cooks.items():
for cname,cattribs in cookgrp.items():
print(cattribs.version)
print(cattribs.name)
print(cattribs.value)
print(cattribs.port)
print(cattribs.port_specified)
print(cattribs.domain)
print(cattribs.domain_specified)
print(cattribs.domain_initial_dot)
print(cattribs.path)
print(cattribs.path_specified)
print(cattribs.secure)
print(cattribs.expires)
print(cattribs.discard)
print(cattribs.comment)
print(cattribs.comment_url)
print(cattribs.rfc2109)
print(cattribs._rest)
When a person needs to access the simple attributes of cookies is it likely less complicated to go after the following way. This avoids the use of RequestsCookieJar. Here we construct a single SimpleCookie instance by reading from the headers attribute of a response object instead of the cookies attribute. The name SimpleCookie would seem to imply a single cookie but that isn't what a simple cookie is. Try it out:
import http.cookies
import requests
import json
import sys
import os
def parse_cookies(http_response):
cookie_grp = http.cookies.SimpleCookie()
for h,v in http_response.headers.items():
if 'set-cookie' in h.lower():
for cook in v.split(','):
cookie_grp.load(cook)
return cookie_grp
aresponse = requests.get('https://www.att.com')
cookies = parse_cookies(aresponse)
print(str(cookies))
You can get list of domains in ResponseCookieJar and then dump the cookies for each domain with the following code:
import requests
response = requests.get("https://stackoverflow.com")
cjar = response.cookies
for domain in cjar.list_domains():
print(f'Cookies for {domain}: {cjar.get_dict(domain=domain)}')
Outputs:
Cookies for domain .stackoverflow.com: {'prov': 'efe8c1b7-ddbd-4ad5-9060-89ea6c29479e'}
In this example, only one domain is listed. It would have multiple lines in output if there were cookies for multiple domains in the Jar.
For many usecases, the cookie jar can be serialized by simply ignoring domains by calling:
dCookies = cjar.get_dict()
We can easily extract cookies string for a particular domain/path using functions already available in requests lib.
import requests
from requests.models import Request
from requests.cookies import get_cookie_header
session = requests.session()
r1 = session.get("https://www.google.com")
r2 = session.get("https://stackoverflow.com")
cookie_header1 = get_cookie_header(session.cookies, Request(method="GET", url="https://www.google.com"))
# '1P_JAR=2022-02-19-18; NID=511=Hz9Mlgl7DtS4uhTqjGOEolNwzciYlUtspJYxQ0GWOfEm9u9x-_nJ1jpawixONmVuyua59DFBvpQZkPzNAeZdnJjwiB2ky4AEFYVV'
cookie_header2 = get_cookie_header(session.cookies, Request(method="GET", url="https://stackoverflow.com"))
# 'prov=883c41a4-603b-898c-1d14-26e30e3c8774'
Request is used to prepare a :class:PreparedRequest <PreparedRequest>, which is sent to the server.
What you need is get_dict() method
a_session = requests.Session()
a_session.get('https://google.com/')
session_cookies = a_session.cookies
cookies_dictionary = session_cookies.get_dict()
# Now just print it or convert to json
as_string = json.dumps(cookies_dictionary)
print(cookies_dictionary)

How to extract path names without including file names

There is url like the following.
original = 'https://dev.s3.amazonaws.com/production/uploads/2017/11/filename.jpg'
I want to extract /production/uploads/2017/11 only from this.
It is possible to extract just the file name and extract the path including the file name, but please tell me if there is a good way to extract the path excluding the file name.
original_image_name = original.split('/')[-1] # 'filename.jpg'
from urllib.parse import urlparse
original_image_url = urlparse(original)
original_image_path = original_image_url.path # '/production/uploads/2017/11/filename.jpg'
You could use:
import os
os.path.dirname(original_image_path)
A quick way would be to use regular expressions, like this one:
(http[s]?:\/\/)?([^\/\s]+)(.*)
See this for a live demo: https://regex101.com/r/2jsyiI/2
urllib has the adequate tools for this:
https://docs.python.org/3.6/library/urllib.parse.html#module-urllib.parse
You will get expected output
original = "https://dev.s3.amazonaws.com/production/uploads/2017/11/filename.jpg"
test = original.split("/")
test = test[3:]
test = test[:-1]
sd = '/'.join(test)
print sd
OUTPUT
production/uploads/2017/11

Posting to FB group with requests, allow youtube video to load

I made a simple python script that posts a random youtube video and a quote to Facebook group(s).
The problem is, that it doesn't give Facebook the time to load the random video. To be more specific, at the moment the post looks like this:
But I want it to look like this:
My current code looks like this (I omitted sensitive data):
""" Song of the day script """
import facebook
import os
from pyquery import PyQuery
import requests
import random
class Sofy(object):
GROUPS = ["123", "123"]
FB_ACCESS_TOKEN = "123accesstoken"
PLAYLISTS = ["123youtubeplaylist"]
VIDEOS = []
def get_video(self):
req = requests.get("https://www.youtube.com/playlist?list={}".format(self.PLAYLISTS[0]))
pq = PyQuery(req.text)
for video in pq(".pl-video").items():
self.VIDEOS.append(video.attr("data-video-id"))
return "https://www.youtube.com/watch?v={}".format(random.choice(self.VIDEOS[-5:]))
def get_qoute(self):
pwd = os.path.dirname(os.path.realpath(__file__))
fx = pwd + '/quotes.txt'
lines = open(fx).read().splitlines()
return random.choice(lines)
def run(self):
quote = self.get_qoute()
video = self.get_video()
graph = facebook.GraphAPI(access_token=self.FB_ACCESS_TOKEN, version='2.2')
for group in self.GROUPS:
graph.put_object(group, "feed", message="{}\n Song of the day: {}".format(quote, video))
print "All done :)"
if __name__=='__main__':
sofy = Sofy()
sofy.run()
I tried doing this with Selenium but it didn't quote work as expected. Also, this way looks cleaner, but I can't figure out how to let youtube video load, I'm not even sure if it's possible?
It doesn't look like you're actually sharing the link correctly, looks like you're adding the URL into the 'message' parameter -
It should be attached correctly if you specify it in the 'link' parameter

Categories