Trying to use Mako template in python to render json data - python

I'm not understanding the documentation on using Mako templates in python. I have a python file and I have the following code:
import json, requests, urllib2
from mako.template import Template
from mako.lookup import TemplateLookup
from pylons.templating import render_mako as render
url="www.data&format=json"
response = urllib2.urlopen(url)
data = json.load(response)
return (Template("hello Luisito! Here is the data: ${data}!").render(json_data=data))
could someone provide me with some more detail on how to render this json data into a webpage using the mako template? do i need another file with the template? if so, how?

You need to add a variable for the json data:
url="www.data&format=json"
response = urllib2.urlopen(url)
data = json.load(response)
from mako.template import Template
print(Template("hello Luisito! Here is the data: ${json_data}!").render(json_data=data))

from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions
from mako.exceptions import RichTraceback
import json
var data = {
"Records": [
{
"pageName":"oracle.html",
"seoMetaData":"A page's description, usually one or two sentences.",
"logoImg":"./images/oracle-cloud-logo-400x336.png",
"logoImgUrl":"http://www.oracle.com",
"description": "Oracle Cloud is a cloud computing service offered by Oracle Corporation providing servers, storage, network, applications and services through a global network of Oracle Corporation managed data centers. The company allows these services to be provisioned on demand over the Internet. Also, we provide the list of companies that use Oracle Cloud.",
"product": "Oracle",
"category": "CRM & Related",
"customerAccount": "125,000 - 150,000"
},
{
"pageName":"microsoft.html",
"seoMetaData":"A page's description, usually one or two sentences.",
"logoImg":"./images/oracle-cloud-logo-400x336.png",
"logoImgUrl":"http://www.microsoft.com",
"description": "Microsoft Cloud is a cloud computing service offered by Microsoft Corporation providing servers, storage, network, applications and services through a global network of Microsoft Corporation managed data centers. The company allows these services to be provisioned on demand over the Internet. Also, we provide the list of companies that use Microsoft Cloud.",
"product": "Microsoft",
"category": "CRM & Related",
"customerAccount": "200,000 - 250,000"
}
]
}
mylookup = TemplateLookup(directories=['./html'], output_encoding='utf-8', encoding_errors='replace')
uri = 'base.html'
def browseLocal(webpageText, filename):
'''Start your webbrowser on a local file containing the text
with given filename.'''
import webbrowser, os.path
strToFile(webpageText, filename)
# webbrowser.open("file:///" + os.path.abspath(filename)) #elaborated for Mac.
def strToFile(text, filename):
"""Write a file with the given name and the given text."""
output = open(filename,"w")
output.write(text)
output.close()
def fileToStr(fileName): # NEW
"""Return a string containing the contents of the named file."""
fin = open(fileName);
contents = fin.read();
fin.close()
return contents
f = open("data.json", "r")
data = f.read()
jsonData = json.loads(data)
list1 = jsonData["Records"]
for val in list1:
pageName = val["pageName"]
seoMetaData = val["seoMetaData"]
logoImg = val["logoImg"]
logoImgUrl = val["logoImgUrl"]
description = val["description"]
product = val["product"]
category = val["category"]
customerAccount = val["customerAccount"]
template = mylookup.get_template(uri)
htmlContent = template.render_unicode(name=pageName, seoMetaData=seoMetaData, logoImg=logoImg, logoImgUrl=logoImgUrl, description=description, product=product, category=category, customerAccount=customerAccount)
browseLocal(htmlContent, pageName)
f.close()

Related

How to combine apispec and flask-swagger-ui to make a Swagger page?

I can't find anything on the internet about how to actually extract and utilize the OpenAPI specification generated by the apispec package to make a Swagger page.
I don't want to rely on a package that hasn't been being actively maintained like flask-apispec. I want to use flask-swagger-ui, apispec, and other standard/well-maintained packages only.
Here is my test app. I don't know if the APISpec is working right because the documentation doesn't tell you anything about what you do with the object, but the Flask app is functional.
from flask import Flask, request, abort
from marshmallow import Schema, fields
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
app = Flask(__name__)
spec = APISpec(
title="Doubler",
version="1.0.0",
openapi_version="3.0.2",
plugins=[FlaskPlugin(), MarshmallowPlugin()],
)
class InputSchema(Schema):
useless_1 = fields.String(required=True, description='A string')
useless_2 = fields.Int(missing=5, description='An integer')
class OutputSchema(Schema):
doublyuseless_1 = fields.String(required=True)
doublyuseless_2 = fields.Int(required=True)
inputschema = InputSchema()
outputschema = OutputSchema()
#app.route('/double', methods=['GET'])
def double():
"""A doubler.
---
get:
description: Double things
parameters:
schema:
InputSchema
responses:
200:
description: Double things
content:
application/json:
schema: OutputSchema"""
errors = inputschema.validate(request.args)
if errors:
abort(400, str(errors))
return_dict = {}
args = inputschema.load(request.args)
return_dict['doublyuseless_1'] = args['useless_1']*2
return_dict['doublyuseless_2'] = args['useless_2']*2
return outputschema.dump(return_dict)
with app.test_request_context():
spec.path(view=double)
UPDATE: with the following code, I now get a blank page at root with the Swagger title, but no content.
with app.test_request_context():
spec.path(view=double)
with open('swagger.json', 'w') as f:
dict_ = yaml.load(StringIO(spec.to_yaml()), Loader=yaml.SafeLoader)
print(dict_)
with open('swagger.json', 'w') as f:
json.dump(dict_, f)
SWAGGER_URL = '/'
API_URL = 'swagger.json'
swaggerui_blueprint = get_swaggerui_blueprint(
SWAGGER_URL, # Swagger UI static files will be mapped to '{SWAGGER_URL}/dist/'
API_URL,
config={ # Swagger UI config overrides
'app_name': "Doubler"
},
# oauth_config={ # OAuth config. See https://github.com/swagger-api/swagger-ui#oauth2-configuration .
# 'clientId': "your-client-id",
# 'clientSecret': "your-client-secret-if-required",
# 'realm': "your-realms",
# 'appName': "your-app-name",
# 'scopeSeparator': " ",
# 'additionalQueryStringParams': {'test': "hello"}
# }
)
The solution to my original problem was quite straightforward: ApiSpec has both a to_dict and a to_yaml method for exporting swagger.json. My second problem was more esoteric. I needed to use a SWAGGER_URL that was not /, because for some reason this caused the page to look for the core Swagger files at URLs like http://swagger-ui.js, which obviously didn't work. Once I changed my path to /doc, I still had a white screen, but that could be fixed by hosting the files myself at /doc (which I think flask_swagger_ui was supposed to do automatically, but hey, it worked).

From local Python function to Google Cloud Function

I created a Python function for an API call so I longer have to do that in Power BI. It creates 5 XML files that are then combined into a single CSV-file. I would like the function to run on Google Cloud (correct me if this is not a good idea).
I don't think it' s possible to create XML files in the function (maybe it's possible to write to a bucket) but ideally I would like to skip the XML file creation and just go straight to creating the CSV.
Please find the code for generating the XML files and combining into CSV below:
offices = ['NL001', 'NL002', 'NL003', 'NL004', 'NL005']
#Voor elke office inloggen, office veranderen en een aparte xml maken
for office in offices:
xmlfilename = office+'.xml'
session.service.SelectCompany(office, _soapheaders={'Header': auth_header})
proces_url = cluster + r'/webservices/processxml.asmx?wsdl'
proces = Client(proces_url)
response = proces.service.ProcessXmlString(query.XML_String, _soapheaders={'Header': auth_header})
f = open(xmlfilename, 'w')
f.write(response)
f.close()
to csv
if os.path.exists('CombinedFinance.csv'):
os.remove('CombinedFinance.csv')
else:
print("The file does not exist")
xmlfiles = ['NL001.xml','NL002.xml','NL003.xml','NL004.xml','NL005.xml']
for xmlfile in xmlfiles:
with open(xmlfile, encoding='windows-1252') as xml_toparse:
tree = ET.parse(xml_toparse)
root = tree.getroot()
columns = [element.attrib['label'] for element in root[0]]
columns.append('?')
data = [[field.text for field in row] for row in root[1::]]
df = pd.DataFrame(data, columns=columns)
df = df.drop('?', axis=1)
df.to_csv('CombinedFinance.csv', mode='a', header=not os.path.exists('CombinedFinance.csv'))
Any ideas?
n.b. If i can improve my code please let me know, I'm just learning all of this
EDIT: In response to some comments, code now looks like this. When deploying to cloud I get the following error:
ERROR: (gcloud.functions.deploy) OperationError: code=13, message=Function deployment failed due to a health check failure. This usually indicates that your code was built successfully but failed during a test execution. Examine the logs to determine the cause. Try deploying again in a few minutes if it appears to be transient.
My requirements.txt looks like this:
zeep==3.4.0
pandas
Any ideas?
import pandas as pd
import xml.etree.ElementTree as ET
from zeep import Client
import query
import authentication
import os
sessionlogin = r'https://login.twinfield.com/webservices/session.asmx?wsdl'
login = Client(sessionlogin)
auth = login.service.Logon(authentication.username, authentication.password, authentication.organisation)
auth_header = auth['header']['Header']
cluster = auth['body']['cluster']
#Use cluster to create a session:
url_session = cluster + r'/webservices/session.asmx?wsdl'
session = Client(url_session)
#Select a company for the session:
offices = ['NL001', 'NL002', 'NL003', 'NL004', 'NL005']
#Voor elke office inloggen, office veranderen en een aparte xml maken
for office in offices:
session.service.SelectCompany(office, _soapheaders={'Header': auth_header})
proces_url = cluster + r'/webservices/processxml.asmx?wsdl'
proces = Client(proces_url)
response = proces.service.ProcessXmlString(query.XML_String, _soapheaders={'Header': auth_header})
treetje = ET.ElementTree(ET.fromstring(response))
root = treetje.getroot()
columns = [element.attrib['label'] for element in root[0]]
columns.append('?')
data = [[field.text for field in row] for row in root[1::]]
df = pd.DataFrame(data, columns=columns)
df = df.drop('?', axis=1)
df.to_csv('/tmp/CombinedFinance.csv', mode='a', header=not os.path.exists('/tmp/CombinedFinance.csv'))
A few things to consider about turning a regular Python script (what you have here) into a Cloud Function:
Cloud Functions respond to events -- either an HTTP request or some other background trigger. You should think about the question "what is going to trigger my function?"
HTTP functions take in a request that corresponds to the incoming request, and must return some sort of HTTP response
The only available part of the filesystem that you can write to is /tmp. You'll have to write all files there during the execution of your function
The filesystem is ephemeral. You can't expect files to stick around between invocations. Any file you create must either be stored elsewhere (like in a GCS bucket) or returned in the HTTP response (if it's an HTTP function)
A Cloud Function has a very specific signature that you'll need to wrap your existing business logic in:
def my_http_function(request):
# business logic here
...
return "This is the response", 200
def my_background_function(event, context):
# business logic here
...
# No return necessary

Google Cloud Analyze Sentiment in JupyterLab with Python

I am using Google Cloud / JupyterLab /Python
I'm trying to run a sample sentiment analysis, following the guide here
However, on running the example, I get this error:
AttributeError: 'SpeechClient' object has no attribute
'analyze_sentiment'
Below is the code I'm trying:
def sample_analyze_sentiment (gcs_content_uri):
gcs_content_uri = 'gs://converted_audiofiles/Converted_Audio/200315_1633 1.txt'
client = language_v1.LanguageServiceClient()
type_ = enums.Document.Type.PLAIN_TEXT
language = "en" document = {
"gcs_content_uri":'gs://converted_audiofiles/Converted_Audio/200315_1633 1.txt',
"type": 'enums.Document.Type.PLAIN_TEXT', "language": 'en'
}
response = client.analyze_sentiment(document,
encoding_type=encoding_type)
I had no problem generating the transcript using Speech to Text but no success getting a document sentiment analysis!?
I had no problem to perform analyze_sentiment following the documentation example.
I have some issues about your code. To me it should be
from google.cloud import language_v1
from google.cloud.language import enums
from google.cloud.language import types
def sample_analyze_sentiment(path):
#path = 'gs://converted_audiofiles/Converted_Audio/200315_1633 1.txt'
# if path is sent through the function it does not need to be specified inside it
# you can always set path = "default-path" when defining the function
client = language_v1.LanguageServiceClient()
document = types.Document(
gcs_content_uri = path,
type = enums.Document.Type.PLAIN_TEXT,
language = 'en',
)
response = client.analyze_sentiment(document)
return response
Therefore, I have tried the previous code with a path of my own to a text file inside a bucket in Google Cloud Storage.
response = sample_analyze_sentiment("<my-path>")
sentiment = response.document_sentiment
print(sentiment.score)
print(sentiment.magnitude)
I've got a successful run with sentiment score -0.5 and magnitude 1.5. I performed the run in JupyterLab with python3 which I assume is the set up you have.

Uploading a Video to Azure Media Services with Python SDKs

I am currently looking for a way to upload a video to Azure Media Services (AMS v3) via Python SDKs. I have followed its instruction, and am able to connect to AMS successfully.
Example
credentials = AdalAuthentication(
context.acquire_token_with_client_credentials,
RESOURCE,
CLIENT,
KEY)
client = AzureMediaServices(credentials, SUBSCRIPTION_ID) # Successful
I also successfully get all the videos' details uploaded via its portal
for data in client.assets.list(RESOUCE_GROUP_NAME, ACCOUNT_NAME).get(0):
print(f'Asset_name: {data.name}, file_name: {data.description}')
# Asset_name: 4f904060-d15c-4880-8c5a-xxxxxxxx, file_name: 夢想全紀錄.mp4
# Asset_name: 8f2e5e36-d043-4182-9634-xxxxxxxx, file_name: an552Qb_460svvp9.webm
# Asset_name: aef495c1-a3dd-49bb-8e3e-xxxxxxxx, file_name: world_war_2.webm
# Asset_name: b53d8152-6ecd-41a2-a59e-xxxxxxxx, file_name: an552Qb_460svvp9.webm - Media Encoder Standard encoded
However, when I tried to use the following method; it failed. Since I have no idea what to parse as parameters - Link to Python SDKs
create_or_update(resource_group_name, account_name, asset_name,
parameters, custom_headers=None, raw=False, **operation_config)
Therefore, I would like to ask questions as follows (everything is done via Python SDKs):
What kind of parameters does it expect?
Can a video be uploaded directly to AMS or it should be uploaded to Blob Storage first?
Should an Asset contain only one video or multiple files are fine?
The documentation for the REST version of that method is at https://learn.microsoft.com/en-us/rest/api/media/assets/createorupdate. This is effectively the same as the Python parameters.
Videos are stored in Azure Storage for Media Services. This is true for input assets, the assets that are encoded, and any streamed content. It all is in Storage but accessed by Media Services. You do need to create an asset in Media Services which creates the Storage container. Once the Storage container exists you upload via the Storage APIs to that Media Services created container.
Technically multiple files are fine, but there are a number of issues with doing that that you may not expect. I'd recommend using 1 input video = 1 Media Services asset. On the encoding output side there will be more than one file in the asset. Encoding output contains one or more videos, manifests, and metadata files.
I have found my method to work around using Python SDKs and REST; however, I am not quite sure it's proper.
Log-In to Azure Media Services and Blob Storage via Python packages
import adal
from msrestazure.azure_active_directory import AdalAuthentication
from msrestazure.azure_cloud import AZURE_PUBLIC_CLOUD
from azure.mgmt.media import AzureMediaServices
from azure.mgmt.media.models import MediaService
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
Create Assets for an original file and an encoded one by parsing these parameters. Example of the original file Asset creation.
asset_name = 'asset-myvideo'
asset_properties = {
'properties': {
'description': 'Original File Description',
'storageAccountName': "storage-account-name"
}
}
client.assets.create_or_update(RESOUCE_GROUP_NAME, ACCOUNT_NAME, asset_name, asset_properties)
Upload a video to the Blob Storage derived from the created original asset
current_container = [data.container for data in client.assets.list(RESOUCE_GROUP_NAME, ACCOUNT_NAME).get(0) if data.name == asset_name][0] # Get Blob Storage location
file_name = "myvideo.mp4"
blob_client = blob_service_client.get_blob_client(container=current_container, blob=file_name)
with open('original_video.mp4', 'rb') as data:
blob_client.upload_blob(data)
print(f'Video uploaded to {current_container}')
And after that, I do Transform, Job, and Streaming Locator to get the video Streaming Link successfully.
I was able to get this to work with the newer python SDK. The python documentation is mostly missing, so I constructed this mainly from the python SDK source code and the C# examples.
azure-storage-blob==12.3.1
azure-mgmt-media==2.1.0
azure-mgmt-resource==9.0.0
adal~=1.2.2
msrestazure~=0.6.3
0) Import a lot of stuff
from azure.mgmt.media.models import Asset, Transform, Job,
BuiltInStandardEncoderPreset, TransformOutput, \
JobInputAsset, JobOutputAsset, AssetContainerSas, AssetContainerPermission
import adal
from msrestazure.azure_active_directory import AdalAuthentication
from msrestazure.azure_cloud import AZURE_PUBLIC_CLOUD
from azure.mgmt.media import AzureMediaServices
from azure.storage.blob import BlobServiceClient, ContainerClient
import datetime as dt
import time
LOGIN_ENDPOINT = AZURE_PUBLIC_CLOUD.endpoints.active_directory
RESOURCE = AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id
# AzureSettings is a custom NamedTuple
1) Log in to AMS:
def get_ams_client(settings: AzureSettings) -> AzureMediaServices:
context = adal.AuthenticationContext(LOGIN_ENDPOINT + '/' +
settings.AZURE_MEDIA_TENANT_ID)
credentials = AdalAuthentication(
context.acquire_token_with_client_credentials,
RESOURCE,
settings.AZURE_MEDIA_CLIENT_ID,
settings.AZURE_MEDIA_SECRET
)
return AzureMediaServices(credentials, settings.AZURE_SUBSCRIPTION_ID)
2) Create an input and output asset
input_asset = create_or_update_asset(
input_asset_name, "My Input Asset", client, azure_settings)
input_asset = create_or_update_asset(
output_asset_name, "My Output Asset", client, azure_settings)
3) Get the Container Name. (most documentation refers to BlockBlobService, which is seems to have been removed from the SDK)
def get_container_name(client: AzureMediaServices, asset_name: str, settings: AzureSettings):
expiry_time = dt.datetime.now(dt.timezone.utc) + dt.timedelta(hours=4)
container_list: AssetContainerSas = client.assets.list_container_sas(
resource_group_name=settings.AZURE_MEDIA_RESOURCE_GROUP_NAME,
account_name=settings.AZURE_MEDIA_ACCOUNT_NAME,
asset_name=asset_name,
permissions = AssetContainerPermission.read_write,
expiry_time=expiry_time
)
sas_uri: str = container_list.asset_container_sas_urls[0]
container_client: ContainerClient = ContainerClient.from_container_url(sas_uri)
return container_client.container_name
4) Upload a file the the input asset container:
def upload_file_to_asset_container(
container: str, local_file, uploaded_file_name, settings: AzureSettings):
blob_service_client = BlobServiceClient.from_connection_string(settings.AZURE_MEDIA_STORAGE_CONNECTION_STRING))
blob_client = blob_service_client.get_blob_client(container=container, blob=uploaded_file_name)
with open(local_file, 'rb') as data:
blob_client.upload_blob(data)
5) Create a transform (in my case, using the adaptive streaming preset):
def get_or_create_transform(
client: AzureMediaServices,
transform_name: str,
settings: AzureSettings):
transform_output = TransformOutput(preset=BuiltInStandardEncoderPreset(preset_name="AdaptiveStreaming"))
transform: Transform = client.transforms.create_or_update(
resource_group_name=settings.AZURE_MEDIA_RESOURCE_GROUP_NAME,
account_name=settings.AZURE_MEDIA_ACCOUNT_NAME,
transform_name=transform_name,
outputs=[transform_output]
)
return transform
5) Submit the Job
def submit_job(
client: AzureMediaServices,
settings: AzureSettings,
input_asset: Asset,
output_asset: Asset,
transform_name: str,
correlation_data: dict) -> Job:
job_input = JobInputAsset(asset_name=input_asset.name)
job_outputs = [JobOutputAsset(asset_name=output_asset.name)]
job: Job = client.jobs.create(
resource_group_name=settings.AZURE_MEDIA_RESOURCE_GROUP_NAME,
account_name=settings.AZURE_MEDIA_ACCOUNT_NAME,
job_name=f"test_job_{UNIQUENESS}",
transform_name=transform_name,
parameters=Job(input=job_input,
outputs=job_outputs,
correlation_data=correlation_data)
)
return job
6) Then I get the URLs after the Event Grid has told me the job is done:
# side-effect warning: this starts the streaming endpoint $$$
def get_urls(client: AzureMediaServices, output_asset_name: str
locator_name: str):
try:
locator: StreamingLocator = client.streaming_locators.create(
resource_group_name=settings.AZURE_MEDIA_RESOURCE_GROUP_NAME,
account_name=settings.AZURE_MEDIA_ACCOUNT_NAME,
streaming_locator_name=locator_name,
parameters=StreamingLocator(
asset_name=output_asset_name,
streaming_policy_name="Predefined_ClearStreamingOnly"
)
)
except Exception as ex:
print("ignoring existing")
streaming_endpoint: StreamingEndpoint = client.streaming_endpoints.get(
resource_group_name=settings.AZURE_MEDIA_RESOURCE_GROUP_NAME,
account_name=settings.AZURE_MEDIA_ACCOUNT_NAME,
streaming_endpoint_name="default")
if streaming_endpoint:
if streaming_endpoint.resource_state != "Running":
client.streaming_endpoints.start(
resource_group_name=settings.AZURE_MEDIA_RESOURCE_GROUP_NAME,
account_name=settings.AZURE_MEDIA_ACCOUNT_NAME,
streaming_endpoint_name="default"
)
paths = client.streaming_locators.list_paths(
resource_group_name=settings.AZURE_MEDIA_RESOURCE_GROUP_NAME,
account_name=settings.AZURE_MEDIA_ACCOUNT_NAME,
streaming_locator_name=locator_name
)
return [f"https://{streaming_endpoint.host_name}{path.paths[0]}" for path in paths.streaming_paths]

whats is the code to create album using FB graph api with appengine- pyrthon SDK?

i found the follwing code in PHP ..
what is the equivalent code in python to do that ?
//At the time of writing it is necessary to enable upload support in the Facebook SDK, you do this with the line:
$facebook->setFileUploadSupport(true);
//Create an album
$album_details = array(
'message'=> 'Album desc',
'name'=> 'Album name'
);
$create_album = $facebook->api('/me/albums', 'post', $album_details);
//Get album ID of the album you've just created
$album_uid = $create_album['id'];
//Upload a photo to album of ID...
$photo_details = array(
'message'=> 'Photo message'
);
$file='app.jpg'; //Example image file
$photo_details['image'] = '#' . realpath($file);
$upload_photo = $facebook->api('/'.$album_uid.'/photos', 'post', $photo_details);
As facebook guys wrote here, they will no longer support python facebook sdk, so it better to make requests via native python tools.
Creating the album:
import urllib,urllib2
access_token = "XXXXXXXXXXXXXXXXXXXXXXXXXXX"
path = "me/albums"
post_args = {'access_token':access_token,'name':"Test Album5", 'message':"Test Album 5"}
post_data = urllib.urlencode(post_args)
file = urllib2.urlopen("https://graph.facebook.com/" + path + "?" , post_data)
response = file.read()
>>>response
'{"id":"XXXXXX702571"}'
Uploading image:
I didn't find a short way to send multipart/form data using urllib2, so I used example from this answer https://stackoverflow.com/a/6843405/592737
import pycurl
import cStringIO
url = 'https://graph.facebook.com/ALBUM_ID/photos'
file ='/path/to/img.jpg'
response = cStringIO.StringIO()
c = pycurl.Curl()
values = [
('file' , (c.FORM_FILE, file)),
('access_token' , access_token),
('message' , 'Image Message'),
]
c.setopt(c.POST, 1)
c.setopt(c.URL,url)
c.setopt(c.HTTPPOST, values)
#c.setopt(c.VERBOSE, 1)
c.setopt(c.WRITEFUNCTION, response.write)
c.perform()
c.close()
>>>response.getvalue()
{"id":"XXXXXX07961"}
But if you're using some fork of facebook python-sdk (like https://github.com/pythonforfacebook/facebook-sdk) you can do it shorter way:
import facebook
access_token = "XXXXXXXXXXXXXXXXXXXXXXXX"
graph = facebook.GraphAPI(access_token)
resp = graph.put_object("me", "albums", name="Test Album",message="Test description")
graph.put_photo(open('/path/to/img.jpg'), 'Look at this cool photo!', resp['id'])
>>> _
{'id': '4394545113756'}
Not supported by Facebook, but you should consider http://code.google.com/p/gae-simpleauth/ for the oauth piece.
Then, as other answers imply, use Python libs like urllib2 to make the graph calls (and possibly simplejson to interpret the responses)

Categories