Get App Engine logs with the help of remote_api - python

I am trying to get my appengine application logs from remote. I am using remote_api, I tried with appcfg but I discarded it because it has a limit on the download/buffer so I can't download all the logs.
Now I am using the logservice, but if I use it in my code it doesn't return anything. Here is my code:
import time
import urllib2
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.logservice import logservice
import getpass
import base64
import os
from appcfg import *
import getpass
import subprocess
os.environ['HTTP_X_APPENGINE_TASKRETRYCOUNT']='1'
os.environ["SERVER_SOFTWARE"] = "Developement"
os.environ['HTTP_HOST'] = 'unitTest'
os.environ['CURRENT_MODULE_ID']='default'
os.environ['CURRENT_VERSION_ID']='1.0'
email_address = "********"
application_url = "myappid.appspot.com"
def aut():
app_name = "myappid.appspot.com"
f = lambda : ("*EMAIL*", "*PASSWORD*")
remote_api_stub.ConfigureRemoteApi(None, '/_ah/remote_api', auth_func,app_name)
print("successfully authenticated")
fetch_logs()
def fetch_logs():
end_time = time.time()
print ("starting")
for req_log in logservice.fetch(end_time = end_time, offset = None, minimum_log_level = logservice.LOG_LEVEL_INFO,
include_app_logs=True, include_incomplete=True):
print req_log.ip
def auth_func():
global email_address
return (email_address, getpass.getpass('Password:'))
aut()
It successfully connects to my app and he make the logservice.fetch(), but it returns an empty object... why?

Related

How can i reference variable in lambda to AWS-RunShellScript command

I am trying to pass a variable account_id to shell script inside a lambda function, this lambda will run that script on instances for that account
import sys
import logging
import datetime
import boto3
import botocore
import time
import json
from os import getenv
region = 'ap-southeast-2'
boto3.setup_default_session(region_name=region)
account_id = 123454
def handler(event, context):
client = boto3.client('ssm')
instance_name_list=\[12345678890,0000000000\]
if instance_name_list:
try:
response = client.send_command(
Targets=\[
{
'Key': 'InstanceIds',
'Values': instance_name_list
},
\],
DocumentName='AWS-RunShellScript',
Parameters={
'commands': \[
# shell command
'echo {account_ID}',
]
}
)
What is the correct way of achieving this ?
You just need to string interpolation.
f'echo {account_id}'

Passing AWS Credentials in Python Script

I have a python script that gets called by a PHP. The user that invokes this php script is apache and hence, the python file also gets invoked by apache. So, it gives "Unable to locate credentials ". I've set the default credentials via awscli and when I invoke the python script as root, it works.
This is my line of code :
client = boto3.client('ses', region_name=awsregion, aws_access_key_id='AJHHJHJHJ', aws_secret_access_key='asdasd/asdasd/asd')
But, this gives "Invalid Syntax" Error. So, I tried this :
client = boto3.Session(aws_access_key_id='ASDASD', aws_secret_access_key='asd/asdasd/asdasd')
client = boto3.client('ses', region_name=awsregion, aws_access_key_id='ASDASD', aws_secret_access_key='asd/asdasd/asdasd')
Gives the same error as above. Weird thing is that this same thing is mentioned in the documentation. Even though it's not recommended, it should work.
Can somebody help me in fixing this?
Did you ever get this resolved? Here is how I connect to boto3 in my Python scripts:
import boto3
from botocore.exceptions import ClientError
import re
from io import BytesIO
import gzip
import datetime
import dateutil.parser as dparser
from datetime import datetime
import tarfile
import requests
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## Needed glue stuff
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
##
## currently this will run for everything that is in the staging directory of omniture
# set needed parms
myProfileName = 'MyDataLake'
dhiBucket = 'data-lake'
#create boto3 session
try:
session = boto3.Session(aws_access_key_id='aaaaaaaaaaaa', aws_secret_access_key='abcdefghijklmnopqrstuvwxyz', region_name='us-east-1')aws_session_token=None, region_name=None, botocore_session=None
s3 = session.resource('s3') #establish connection to s3
except Exception as conne:
print ("Unable to connect: " + str(conne))
errtxt = requests.post("https://errorcapturesite", data={'message':'Unable to connect to : ' + myProfileName, 'notify':True,'color':'red'})
print(errtxt.text)
exit()

python map drive programmatically in Windows 10

I'm trying to map a remote path on Windows 10 for serveral hours but i don't get it to work. At first i tried it with WNetAddConnection2 but no matter what credentials or flags i use, when I type net use the mapped drive has always the status not available.
Manually i can map drives without problems, I only have problems when i map the drive programmatically.
import win32wnet
import win32netcon
nr = win32wnet.NETRESOURCE()
nr.dwScope = win32netcon.RESOURCE_GLOBALNET
nr.dwType = win32netcon.RESOURCETYPE_DISK
nr.dwUsage = win32netcon.RESOURCEUSAGE_CONNECTABLE
nr.lpLocalName = 'Z:'
nr.lpRemoteName = '\\\\192.168.178.46\\Test'
win32wnet.WNetAddConnection2(nr, None, None, 25)
The 25 is a flag set of interactive and prompt. I don't get any errors and the drive is listed when i type net use, but the status is always not available and the drive is not visible under workstation.
After that I tried NetUseAdd:
import win32net
win32net.NetUseAdd(None, 3, {'remote': r'\\192.168.178.46\Test',
'local': 'Z:', 'username': 'Admin', 'password': '123',
'status': 0, 'flags': 1, 'asg_type': 0})
It runs successfully but net use doesn't list anything and no mapped drives are visible under workstation.
A solution without subprocess would be nice. Can someone help please?
EDIT: Now i understand why it doesn't work. The app is running in admin context and I'm current logged in as non-admin. This behaviour is expalined here: https://superuser.com/questions/495370/why-isnt-a-mapped-drive-available-under-an-elevated-cmd-prompt-but-is-under-a-r
Is it possible to run the app as admin but the WNetAddConnection2 method as current user??
EDIT 2: Following the instructions from eryksun i came up with this:
import ctypes
from win32security import TOKEN_IMPERSONATE, TOKEN_ALL_ACCESS
from win32process import GetWindowThreadProcessId
from win32api import OpenProcess
from win32security import OpenProcessToken
from win32security import ImpersonateLoggedOnUser
from win32security import RevertToSelf
user32 = ctypes.WinDLL('user32', use_last_error=True);
user32.GetShellWindow.restype = ctypes.c_void_p
handle = user32.GetShellWindow()
threadId, processId = GetWindowThreadProcessId(handle)
handle_op = OpenProcess(TOKEN_ALL_ACCESS, True, processId)
handle_opt = OpenProcessToken(handle_op, TOKEN_IMPERSONATE)
ImpersonateLoggedOnUser(handle_opt) # throws access denied error
SOLUTION:
import ctypes
from win32process import GetWindowThreadProcessId
from win32api import OpenProcess
from win32security import OpenProcessToken, ImpersonateLoggedOnUser, RevertToSelf, TOKEN_QUERY, TOKEN_DUPLICATE
from win32con import PROCESS_QUERY_INFORMATION
user32 = ctypes.WinDLL('user32', use_last_error=True);
user32.GetShellWindow.restype = ctypes.c_void_p
handle = user32.GetShellWindow()
threadId, processId = GetWindowThreadProcessId(handle)
handle_op = OpenProcess(PROCESS_QUERY_INFORMATION, False, processId)
handle_opt = OpenProcessToken(handle_op, TOKEN_QUERY | TOKEN_DUPLICATE)
ImpersonateLoggedOnUser(handle_opt)
try:
nr = win32wnet.NETRESOURCE()
nr.dwScope = win32netcon.RESOURCE_GLOBALNET
nr.dwType = DISK
nr.dwUsage = win32netcon.RESOURCEUSAGE_CONNECTABLE
nr.lpLocalName = 'Z:'
nr.lpRemoteName = '\\\\192.168.178.46\\Test'
win32wnet.WNetAddConnection3(None, nr, None, None, 25)
except:
print("Unexpected error...")
RevertToSelf()
win32wnet.WNetAddConnection2(win32netcon.RESOURCETYPE_DISK, drive,
networkPath, None, user, password)
Drive is the local drive you want to map the network drive to, e.g. X:\
For networkpath add \\ in the beginning of the path, e.g. \\\\networkpath02 if you can access the path with \\networkpath02 in explorer.

Appengine logservice with remote_api

I am trying to get my appengine application logs from remote.
I am using remote_api, I tried with appcfg but I discarded it because it has a limit on the download/buffer so I can't download all the logs.
Now I am using the logservice, but if I use it in my code it doesn't return anything.
Here is my code:
import time
import urllib2
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.logservice import logservice
import getpass
import base64
import os
from appcfg import *
import getpass
import subprocess
os.environ['HTTP_X_APPENGINE_TASKRETRYCOUNT']='1'
os.environ["SERVER_SOFTWARE"] = "Developement"
os.environ['HTTP_HOST'] = 'unitTest'
os.environ['CURRENT_MODULE_ID']='default'
os.environ['CURRENT_VERSION_ID']='1.0'
email_address = "iacopo#indiegala.com"
application_url = "store-indiegala.appspot.com"
def aut():
app_name = "store-indiegala.appspot.com"
f = lambda : ("*EMAIL*", "*PASSWORD*")
remote_api_stub.ConfigureRemoteApi(None, '/_ah/remote_api', auth_func,app_name)
print("successfully authenticated")
fetch_logs()
def fetch_logs():
end_time = time.time()
print ("starting")
for req_log in logservice.fetch(end_time = end_time, offset = None, minimum_log_level = logservice.LOG_LEVEL_INFO,
include_app_logs=True, include_incomplete=True):
print req_log.ip
def auth_func():
global email_address
return (email_address, getpass.getpass('Password:'))
aut()
It successfully connects to my app and he make the logservice.fetch(), but it returns an empty object... why?
Go to your logs in the App Engine admin and make sure you have the right module and version. They can be found in each log entry, for example:
2015-01-24 21:58:43.425 / active start=2015-01-24,13:57:36.773 AppEngine-Google; (+http://code.google.com/appengine) module=default version=baseline
Becomes:
import os
os.environ["CURRENT_MODULE_ID"] = "default"
os.environ['CURRENT_VERSION_ID']= "baseline"`

CherryPy Logging: How do I configure and use the global and application level loggers?

I'm having trouble with logging. I'm running CherryPy 3.2 and I've been reading through the docs here, but haven't found any examples of how to configure a local log file for output and how to write to it.
Raspberry.py:
import socket
import sys
import cherrypy
app_roots = {
# Sean's laptop dev environment.
"mylaptop": "/home/src/local-mydomain.com/py",
# Hosted dev environment.
"mydomain.com" : "/home/dev/src/py"
}
hostname = socket.gethostname()
CherryPyLog = cherrypy.tree.mount().log
if hostname not in app_roots:
CherryPyLog("The following hostname does not have an app_root entry in raspberry.py. Exiting early.")
sys.exit()
sys.stdout = sys.stderr
sys.path.append(app_roots[hostname])
import os
os.chdir(app_root)
# Setup for raspberry application logging.
import datetime
today = datetime.datetime.today()
log.access_file = "{0}/{1}.raspberry.access.log".format(app_roots[hostname],today.strftime("%Y%m%d-%H%M"))
log.error_file = "{0}/{1}.raspberry.error.log".format(app_roots[hostname],today.strftime("%Y%m%d-%H%M"))
#Testing logger
log("{0} -- Logger configured".format(today.strftime("%Y%m%d-%H%M%S")))
import atexit
cherrypy.config.update({'environment': 'embedded'})
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking = False)
atexit.register(cherrypy.engine.stop)
from web.controllers.root import RaspberryRequestHandler
application = cherrypy.Application(RaspberryRequestHandler(), script_name = None, config = None)
UPDATE: Here's the code block that I ended up going with.
app_roots = {
# Sean's laptop dev environment.
"mylaptop": "/home/src/local-plottools.com/py",
# Hosted dev environment.
"myDomain" : "/home/dev/src/py"
}
import socket
hostname = socket.gethostname()
import cherrypy
import sys
if hostname not in app_roots:
cherrypy.log("The hostname {0} does not have an app_root entry in {1}. Exiting early.".format(hostname,__file__))
sys.exit()
sys.stdout = sys.stderr
sys.path.append(app_roots[hostname])
import os
os.chdir(app_roots[hostname])
from web.controllers.root import RaspberryRequestHandler
cherrypy.config.update({
'log.access_file': "{0}/cherrypy-access.log".format(app_roots[hostname]),
'log.error_file': "{0}/cherrypy.log".format(app_roots[hostname]),
"server.thread_pool" : 10
})
# special case, handling debug sessions when quickstart is needed.
if __name__ == "__main__":
cherrypy.config.update({
'log.screen': True,
"server.socket_port": 8000
})
cherrypy.quickstart(RaspberryRequestHandler())
sys.exit()
# This configuration is needed for running under mod_wsgi. See here: http://tools.cherrypy.org/wiki/ModWSGI
cherrypy.config.update({'environment': 'embedded'})
applicationLogName = "{0}/raspberry.log".format(app_roots[hostname])
from logging import handlers
applicationLogFileHandler = handlers.RotatingFileHandler(applicationLogName, 'a', 10000000, 1000)
import logging
applicationLogFileHandler.setLevel(logging.DEBUG)
from cherrypy import _cplogging
applicationLogFileHandler.setFormatter(_cplogging.logfmt)
cherrypy.log.error_log.addHandler(applicationLogFileHandler)
application = cherrypy.Application(RaspberryRequestHandler(), None)
Simplifying a bit:
import os
import socket
import sys
import cherrypy
app_roots = {
# Sean's laptop dev environment.
"mylaptop": "/home/src/local-mydomain.com/py",
# Hosted dev environment.
"mydomain.com" : "/home/dev/src/py"
}
hostname = socket.gethostname()
if hostname not in app_roots:
cherrypy.log("The hostname %r does not have an app_root entry in "
"raspberry.py. Exiting early." % hostname)
sys.exit()
sys.path.append(app_roots[hostname])
os.chdir(app_root)
cherrypy.config.update({
'environment': 'embedded',
'log.access_file': "{0}/raspberry.access.log".format(app_roots[hostname]),
'log.error_file': "{0}/raspberry.error.log".format(app_roots[hostname]),
})
from web.controllers.root import RaspberryRequestHandler
application = cherrypy.tree.mount(RaspberryRequestHandler(), '/')
# Insert log changes here
cherrypy.engine.start()
If you want different logs per day, use a RotatingFileHandler as described at http://www.cherrypy.org/wiki/Logging#CustomHandlers The important point I think you're missing is that you should muck about with app.log only after you've instantiated your app (e.g. via tree.mount(), as above), but before engine.start. That is, for the error log:
application = cherrypy.tree.mount(RaspberryRequestHandler(), '/')
log = application.log
log.error_file = ""
# Make a new RotatingFileHandler for the error log.
fname = "{0}/raspberry.error.log".format(app_roots[hostname])
h = handlers.RotatingFileHandler(fname, 'a', 10000000, 1000)
h.setLevel(DEBUG)
h.setFormatter(_cplogging.logfmt)
log.error_log.addHandler(h)
cherrypy.engine.start()
Hope that helps...

Categories