I have a Django function that takes in a Nessus file and then parses the data before saving it to the database, my Nessus file typically has about 30k rows and saving this to the database can take as much as 2 hours, I have tried using bulk_create but this breaks the code, meanwhile I use Django 1.11, is there a way I can speed up these large inserts to the database (postgres)
Here is my code:
def process_nessus_file(*args, **kwargs):
process_obj = kwargs.get('file')
context = kwargs.get('context')
request = kwargs.get('request')
file_obj = process_obj.first()
file_path = file_obj.file.path
context = etree.iterparse(
file_path,
events=('end', ),
tag="ReportHost"
)
total_issues = 0
detected_issues = 0
undetected_issues = 0
already_exist_issue = 0
low_risk_count = 0
medium_risk_count = 0
high_risk_count = 0
critical_risk_count = 0
low_new_issue = 0
medium_new_issue = 0
high_new_issue = 0
critical_new_issue = 0
vul_history = []
for event, elem in context:
first_identified = None
last_seen = None
host = elem.get('name')
logger.info('Processing issue for host : {}'.format(host))
for child in elem:
if child.tag == "HostProperties":
for host_prop_tags in child:
if host_prop_tags.attrib['name'] == "HOST_START":
first_identified = host_prop_tags.text
elif host_prop_tags.attrib['name'] == "HOST_END":
last_seen = host_prop_tags.text
if child.tag == "ReportItem":
main_tags = child.attrib
child_tags = dict()
for ch_tags in child:
if ch_tags.text:
tag_text = ch_tags.text.strip()
else:
tag_text = ch_tags.text
child_tags[ch_tags.tag] = tag_text
if child_tags.get('solution') and \
child_tags.get('solution') in ['n/a', 'N/A']:
child_tags['solution'] = ''
plugin_output = child_tags.get('plugin_output')
pluginid = int(main_tags.get('pluginID'))
if plugin_output and (pluginid == 10107):
if re.search(BANNER_PATTERN, plugin_output):
banner_pattern = plugin_output.replace("{}".\
format(BANNER_PATTERN), "")
banner = banner_pattern.strip()
else:
banner = ''
else:
banner = ''
risk = child_tags.get('risk_factor')
synopsis = child_tags.get('synopsis')
description = child_tags.get('description')
solution = child_tags.get('solution')
protocol = main_tags.get('protocol')
port = main_tags.get('port')
pluginname = main_tags.get('pluginName')
svcname = main_tags.get('svc_type')
try:
host_type = get_host_type(host)
user_host = check_host_exists(host, host_type)
if user_host and not NessusData.objects.filter(
plugin_id=int(pluginid), host=host,
port=int(port), name=pluginname
).exists():
try:
host_link_obj = Host.objects.get(
host=host
)
except Host.MultipleObjectsReturned:
host_link_obj = host.objects.filter(
host=host
).first()
except Host.DoesNotExist:
host_link_obj = Host.objects.create(
host=host,
user_host=user_host
)
nessus_obj = NessusFile.objects.create(
user_host=user_host,
host_link=host_link_obj,
linked_file=file_obj,
plugin_id=int(pluginid),
risk=risk, host=host,
protocol=protocol, port=int(port),
banner=banner, name=pluginname,
svc_type=svcname,
description=description,
first_identified=first_identified,
last_seen=last_seen,
synopsis=synopsis,
plugin_output=plugin_output,
solution=solution
)
issue = "Issue with host {}, port {} and"\
" pluginID {} is added.".\
format(
nessus_obj.host, nessus_obj.port,
nessus_obj.plugin_id
)
NessusFileLog.objects.create(
linked_file=file_obj,
issue_type="new",
issue=issue
)
detected_issues = detected_issues + 1
if risk == 'Medium':
medium_new_issue = medium_new_issue + 1
elif risk == 'Low':
low_new_issue = low_new_issue + 1
elif risk == 'High':
high_new_issue = high_new_issue + 1
elif risk == 'Critical':
critical_new_issue = critical_new_issue + 1
else:
nessus_obj = NessusFile.objects.filter(
plugin_id=int(pluginid), host=host,
port=int(port), name=pluginname
).first()
if nessus_obj and not nessus_obj.last_seen:
nessus_obj.last_seen = last_seen
nessus_obj.save()
issue = "Issue with host {}, port {} and"\
" pluginID {} is already exists.".\
format(host,port, pluginid)
NessusFileLog.objects.create(
linked_file=file_obj,
issue_type="duplicate",
issue=issue
)
already_exist_issue = already_exist_issue + 1
except Exception as e:
pass
if risk == 'Medium':
medium_risk_count = medium_risk_count + 1
elif risk == 'Low':
low_risk_count = low_risk_count + 1
elif risk == 'High':
high_risk_count = high_risk_count + 1
elif risk == 'Critical':
critical_risk_count = critical_risk_count + 1
total_issues = total_issues + 1
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
I heard using raw sql queries will speed it up but I cant wrap my head around the process
Related
I'm unable to generate all entries in Kaltura. An ApiException with the message "Unable to generate list. max matches value was reached" (Error: QUERY_EXCEEDED_MAX_MATCHES_ALLOWED) gets triggered.
I tried to work around such issue by setting my sessionPrivileges to disableentitlement
class class_chk_integrity():
client = None
pagesize = 0
def __init__(self,worker_num, progress):
self.pagesize = 30
self.worker_num = worker_num
self.progress = progress
config = KalturaConfiguration(2723521)
config.serviceUrl = "https://www.kaltura.com/"
self.client = KalturaClient(config)
ks = self.client.session.start("KALTURA_ADMIN_SECRET",
"email#email.com",
KalturaPluginsCore.KalturaSessionType.ADMIN,
"KALTURA_PARTNER_ID",
432000,
"disableentitlement")
self.client.setKs(ks)
I also tried to filter based on the id's. However, I can't manage to put the filter.idNotIn to work properly.
def get_total_reg(self, cont, lastEntryIds, lastEntryCreatedAt):
filter = KalturaPluginsCore.KalturaBaseEntryFilter()
if lastEntryIds != "":
filter.idNotIn = lastEntryIds
filter.orderBy = KalturaBaseEntryOrderBy.CREATED_AT_DESC
pager = KalturaPluginsCore.KalturaFilterPager()
pageIndex = 1
entriesGot = 0
pager.pageSize = self.pagesize
pager.setPageIndex = pageIndex
result = self.client.baseEntry.list(filter, pager)
totalCount = result.totalCount
if totalCount > 10000:
totalCount = 9970
if totalCount <= 0:
cont = False
while entriesGot < totalCount:
pager.pageSize = self.pagesize
pageIndex += 1
pager.pageIndex = pageIndex
result = self.client.baseEntry.list(filter, pager)
entriesGot += len(result.objects)
for e in result.objects:
if lastEntryIds == "":
lastEntryIds.append(e.id)
else:
lastEntryIds.append(e.id)
lastEntryCreatedAt = e.createdAt
return result.totalCount, self.pagesize, cont, lastEntryIds, lastEntryCreatedAt
This is my how I'm calling the functions
if __name__ == '__main__':
try:
log = _ServiceUtils.log()
log.setup('all', 'integrity')
cont = True
lastEntryIds = []
lastEntryCreatedAt = 0
while cont is True:
kmc = class_chk_integrity(0,0)
kmc_total_reg, kmc_page_size, cont, lastEntryIds, lastEntryCreatedAt = kmc.get_total_reg(cont, lastEntryIds, lastEntryCreatedAt)
interval = 10
max_threads = math.ceil(kmc_total_reg / (interval * kmc_page_size))
# max_threads = 1
threads_list = []
print('TOTAL REG : %s | PAGE_SIZE : %s | INTERVAL : %s | THREADS : %s' % (kmc_total_reg,kmc_page_size,interval,max_threads))
progress = class_progress_thread(max_threads)
for index in range(0,max_threads):
page_ini = index * interval
page_end = index * interval + interval
progress.add_worker_progress(index,datetime.now())
threads_list.append(threading.Thread(target=thread_chk_integrity, args=(index, log, index * interval + 1,index * interval + interval,progress)))
threads_list.append(threading.Thread(target=thread_output_progress, args=(progress,max_threads)))
for thread in threads_list:
thread.start()
for thread in threads_list:
thread.join()
while not progress.stop(): time.sleep(30)
except KeyboardInterrupt:
try:
sys.exit(0)
except SystemExit:
os._exit(0)
I'd appreciate any help with this.
Thank you for your attention.
if totalCount > 10000:
totalCount = 9970
I'm curious to know why you are changing the totalCount this way.
Short answer - paging works as long as the result set is up to 10K.
To work around that, sort the result by creation date (as you did), and when you get to 10K, start with a new search where the created_at date in the filter is the last value you got in the previous search. Reset your paging of course.
I am working on a python script to add a VM to a DRS group.
Here is a powershell script that is doing the job. How to translate it to py?
$spec = New-Object VMware.Vim.ClusterConfigSpecEx
$spec.groupSpec = New-Object VMware.Vim.ClusterGroupSpec[] (1)
$spec.groupSpec[0] = New-Object VMware.Vim.ClusterGroupSpec
$spec.groupSpec[0].operation = "edit"
$spec.groupSpec[0].info = $DrsGroup
$spec.groupSpec[0].info.vm += $VM.ExtensionData.MoRef
$Cluster.ExtensionData.ReconfigureComputeResource_Task($spec, $true)
actually I block on the last line becuase I do not found the ReconfigureComputeResource methode. My code hereafter:
def get_drsgroup(self, cluster_obj=None, group_name=None):
if cluster_obj is None:
cluster_obj = self.cluster_obj
if group_name:
group_list = [group for group in cluster_obj.configurationEx.group if group.name == group_name]
if group_list:
return group_list[0]
return None
def assign_vm_to_group(self):
drs_group = self.get_drsgroup(group_name="DC_VM")
my_vm = self.get_obj(
[vim.VirtualMachine],
"Test_vm",
return_all=True)
spec = vim.cluster.ConfigSpecEx()
groupspec = vim.cluster.GroupSpec()
groupspec.operation = "edit"
groupspec.info = drs_group
groupspec.info.vm += my_vm
self.cluster_obj.xxxxxxxxxxxxxx
Thanks for your help
def manage_vm_to_DRS_group(self, action=None):
#--- requiered objects already exist
drs_group = self.get_drsgroup(group_name=self.drs_rule_name)
my_vm = self.get_obj(
[vim.VirtualMachine],
self.vm,
return_all=True
)
#---
try:
spec = vim.cluster.ConfigSpecEx()
my_groupSpec = vim.cluster.GroupSpec()
my_groupSpec.operation = "edit"
my_groupSpec.info = drs_group
if self.state == 'present':
my_groupSpec.info.vm += my_vm
elif self.state == 'removed':
#my_groupSpec.info.vm = my_groupSpec.info.vm.remove(my_vm[0])
my_groupSpec.info.vm.remove(my_vm[0])
else:
self.module.fail_json(msg="Module failure: check module, fuction: assign_vm_to_group, indice: 1")
spec.groupSpec.append(my_groupSpec)
if not self.check_mode:
self.cluster_obj.ReconfigureComputeResource_Task(spec, modify=True)
except:
self.module.fail_json(msg="Module failure:Failed to assign vm '%s' to DRS group '%s'" % (
self.vm, self.drs_rule_name))
self.module.exit_json(changed=True, result="SUCCESS vm '%s' is '%s' on DRS group '%s'" % (
self.vm, self.state, self.drs_rule_name))
I have a Django application where I am trying to make a call to Fedex's API to send out a shipping label for people wanting to send in a product for cash. When I try to make the call though it says there is a data validation issue with the Expiration field in the XML I am filling out. I swear this has worked in the past with me formatting the date as "YYYY-MM-DD", but now it is not. I read that with Fedex, you need to format the date as ISO, but that is also not passing the data validation. I am using a python package created to help with tapping Fedex's API.
Django view function for sending API Call
def Fedex(request, quote):
label_link = ''
expiration_date = datetime.datetime.now() + datetime.timedelta(days=10)
# formatted_date = "%s-%s-%s" % (expiration_date.year, expiration_date.month, expiration_date.day)
formatted_date = expiration_date.replace(microsecond=0).isoformat()
if quote.device_type != 'laptop':
box_length = 9
box_width = 12
box_height = 3
else:
box_length = 12
box_width = 14
box_height = 3
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
## Page 411 of FedEx Dev Guide - 20.14 Email Labels
CONFIG_OBJ = FedexConfig(key=settings.FEDEX_KEY, password=settings.FEDEX_PASSWORD, account_number=settings.FEDEX_ACCOUNT,
meter_number=settings.FEDEX_METER, use_test_server=settings.USE_FEDEX_TEST)
fxreq = FedexCreatePendingShipRequestEmail(CONFIG_OBJ, customer_transaction_id='xxxxxx id:01')
fxreq.RequestedShipment.ServiceType = 'FEDEX_GROUND'
fxreq.RequestedShipment.PackagingType = 'YOUR_PACKAGING'
fxreq.RequestedShipment.DropoffType = 'REGULAR_PICKUP'
fxreq.RequestedShipment.ShipTimestamp = datetime.datetime.now()
# Special fields for the email label
fxreq.RequestedShipment.SpecialServicesRequested.SpecialServiceTypes = ('RETURN_SHIPMENT', 'PENDING_SHIPMENT')
fxreq.RequestedShipment.SpecialServicesRequested.PendingShipmentDetail.Type = 'EMAIL'
fxreq.RequestedShipment.SpecialServicesRequested.PendingShipmentDetail.ExpirationDate = formatted_date
email_address = fxreq.create_wsdl_object_of_type('EMailRecipient')
email_address.EmailAddress = quote.email
email_address.Role = 'SHIPMENT_COMPLETOR'
# RETURN SHIPMENT DETAIL
fxreq.RequestedShipment.SpecialServicesRequested.ReturnShipmentDetail.ReturnType = ('PENDING')
fxreq.RequestedShipment.SpecialServicesRequested.ReturnShipmentDetail.ReturnEMailDetail = fxreq.create_wsdl_object_of_type(
'ReturnEMailDetail')
fxreq.RequestedShipment.SpecialServicesRequested.ReturnShipmentDetail.ReturnEMailDetail.MerchantPhoneNumber = 'x-xxx-xxx-xxxx'
fxreq.RequestedShipment.SpecialServicesRequested.PendingShipmentDetail.EmailLabelDetail.Recipients = [email_address]
fxreq.RequestedShipment.SpecialServicesRequested.PendingShipmentDetail.EmailLabelDetail.Message = "Xxxxxx Xxxxxx"
fxreq.RequestedShipment.LabelSpecification = {'LabelFormatType': 'COMMON2D', 'ImageType': 'PDF'}
fxreq.RequestedShipment.Shipper.Contact.PersonName = quote.first_name + ' ' + quote.last_name
fxreq.RequestedShipment.Shipper.Contact.CompanyName = ""
fxreq.RequestedShipment.Shipper.Contact.PhoneNumber = quote.phone
fxreq.RequestedShipment.Shipper.Address.StreetLines.append(quote.address)
fxreq.RequestedShipment.Shipper.Address.City = quote.city
fxreq.RequestedShipment.Shipper.Address.StateOrProvinceCode = quote.state
fxreq.RequestedShipment.Shipper.Address.PostalCode = quote.zip
fxreq.RequestedShipment.Shipper.Address.CountryCode = settings.FEDEX_COUNTRY_CODE
fxreq.RequestedShipment.Recipient.Contact.PhoneNumber = settings.FEDEX_PHONE_NUMBER
fxreq.RequestedShipment.Recipient.Address.StreetLines = settings.FEDEX_STREET_LINES
fxreq.RequestedShipment.Recipient.Address.City = settings.FEDEX_CITY
fxreq.RequestedShipment.Recipient.Address.StateOrProvinceCode = settings.FEDEX_STATE_OR_PROVINCE_CODE
fxreq.RequestedShipment.Recipient.Address.PostalCode = settings.FEDEX_POSTAL_CODE
fxreq.RequestedShipment.Recipient.Address.CountryCode = settings.FEDEX_COUNTRY_CODE
fxreq.RequestedShipment.Recipient.AccountNumber = settings.FEDEX_ACCOUNT
fxreq.RequestedShipment.Recipient.Contact.PersonName = ''
fxreq.RequestedShipment.Recipient.Contact.CompanyName = 'Xxxxxx Xxxxxx'
fxreq.RequestedShipment.Recipient.Contact.EMailAddress = 'xxxxxx#xxxxxxxxx'
# Details of Person Who is Paying for the Shipping
fxreq.RequestedShipment.ShippingChargesPayment.PaymentType = 'SENDER'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.AccountNumber = settings.FEDEX_ACCOUNT
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Contact.PersonName = 'Xxxxx Xxxxx'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Contact.CompanyName = 'Xxxxx Xxxxxx'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Contact.PhoneNumber = 'x-xxx-xxx-xxxx'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Contact.EMailAddress = 'xxxxxxx#xxxxxxxxx'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Address.StreetLines = 'Xxxxx N. xXxxxxx'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Address.City = 'Xxxxxxx'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Address.StateOrProvinceCode = 'XX'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Address.PostalCode = 'xxxxx'
fxreq.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.Address.CountryCode = 'US'
# Package Info
package1 = fxreq.create_wsdl_object_of_type('RequestedPackageLineItem')
package1.SequenceNumber = '1'
package1.Weight.Value = 1
package1.Weight.Units = "LB"
package1.Dimensions.Length = box_length
package1.Dimensions.Width = box_width
package1.Dimensions.Height = box_height
package1.Dimensions.Units = "IN"
package1.ItemDescription = 'Phone'
fxreq.RequestedShipment.RequestedPackageLineItems.append(package1)
fxreq.RequestedShipment.PackageCount = '1'
try:
fxreq.send_request()
label_link = str(fxreq.response.CompletedShipmentDetail.AccessDetail.AccessorDetails[0].EmailLabelUrl)
except Exception as exc:
print('Fedex Error')
print('===========')
print(exc)
print('==========')
return label_link
Error Log
Error:cvc-datatype-valid.1.2.1: \\'2017-11-3\\' is not a valid value for \\'date\\'.\\ncvc-type.3.1.3: The value \\'2017-11-3\\' of element \\'ns0:ExpirationDate\\' is not valid."\\n }\\n }' (Error code: -1)
i have problems with scrapy running multiple crawlers. What i want to achieve:
I have an engine running in the background taking tasks/jobs from the mysql database. Every 15 seconds the mysql db is queried. If there is a new job scrapy should process it.
My setup is working fine so far - the last problem is, that my spiders (scrapy spiders) get "stacked" in the CrawlerRunner.
Start:
def schedule():
jobs = GetJob.Job()
jobs.getJobs()
if __name__ == "__main__":
t = task.LoopingCall(schedule)
t.start(15)
reactor.run()
After GetJob.Job() the Jobs will be processed here:
class ProcessJob():
def processJob(self, job):
#update job
mysql = MysqlConnector.Mysql()
db = mysql.getConnection()
cur = db.cursor()
job.status = 1
update = "UPDATE job SET status=1 WHERE id=" + str(job.id)
cur.execute(update)
db.commit()
db.close()
#Start new crawler
webspider = MySpider.MySpider(job)
#Some settings
ajaxSettings = CrawlerSettings.ajax_settings
normalSettings = CrawlerSettings.normal_settings
configure_logging()
if job.max_pages != 0:
ajaxSettings["CLOSESPIDER_PAGECOUNT"] = 0
ajaxSettings["CLOSESPIDER_ITEMCOUNT"] = job.max_pages
normalSettings["CLOSESPIDER_PAGECOUNT"] = 0
normalSettings["CLOSESPIDER_ITEMCOUNT"] = job.max_pages
#max connections
concurrent_requests = int(job.max_pages / 20)
if concurrent_requests < 1:
concurrent_requests = 10
if concurrent_requests > 500:
concurrent_requests = 500
ajaxSettings["CONCURRENT_REQUESTS"] = concurrent_requests
normalSettings["CONCURRENT_REQUESTS"] = concurrent_requests
#Ajax true or false
if job.ajax == 1:
runner = CrawlerRunner(ajaxSettings)
else:
runner = CrawlerRunner(normalSettings)
d = runner.crawl(webspider, job=job)
And here is my spider:
class MySpider(CrawlSpider):
def __init__(self, job):
#Get the hosts
self.job = job
dispatcher.connect(self.spider_closed, signals.spider_closed)
allowedDomainsPre = job.url.split(",")
allowedDomains = []
for domains in allowedDomainsPre:
parsed_uri = urlparse(domains)
domain = '{uri.netloc}'.format(uri=parsed_uri)
print domain
allowedDomains.append(domain)
self.allowed_domains = allowedDomains
self.start_urls = allowedDomainsPre
#Get job patterns
jobPatterns = job.processing_patterns.split(",")
allowedPatterns = []
deniedPatterns = []
for pattern in jobPatterns:
if '-' in pattern:
deniedPatterns.append(pattern.replace("-", ""))
else:
allowedPatterns.append(pattern)
self._rules = [
Rule(LinkExtractor(allow=(allowedPatterns), deny=(deniedPatterns)), callback=self.parse_items, follow=True)
]
self.name = job.id
self.settings = CrawlerSettings.normal_settings
def spider_closed(self, spider):
stats = spider.crawler.stats.get_stats()
itemCount = 0
try:
itemCount = stats["item_scraped_count"]
except:
print "Item count = zero"
DoneJob.DoneJob().jobDone(self.job, itemCount)
def parse_items(self, response):
item = Item()
#if the user wants a minimum description
if self.job.min_description > 0:
item['html'] = response.body
item['url'] = response.url
item['job_id'] = self.job.id
soup = BeautifulSoup(response.body, 'html.parser')
article = Document(soup.prettify()).summary()
article_soup = BeautifulSoup(article)
text = re.sub(' +', ' ', article_soup.get_text().rstrip())
text_length = len(text.split(' '))
if text_length > self.job.min_description:
return item
else:
item['html'] = response.body
item['url'] = response.url
item['job'] = {}
#Job
item['job']['id'] = self.job.id
item['job']['user_id'] = self.job.user_id
item['job']['name'] = self.job.name
item['job']['url'] = self.job.url
item['job']['api'] = self.job.api
item['job']['max_pages'] = self.job.max_pages
item['job']['crawl_depth'] = self.job.crawl_depth
item['job']['processing_patterns'] = self.job.processing_patterns
item['job']['days'] = self.job.days
item['job']['ajax'] = self.job.ajax
item['job']['min_description'] = self.job.min_description
return item
So after running two or three jobs my spider_closed get called multiple times instead of (expected) once.
So what is wrong here?
I get this error: Invaild syntax in my "if" statement and rly can't figur why, can anyone of you guys help me? I'm using python 3.2
here is the part of my code whit the error my code:
L = list()
LT = list()
tn = 0
players = 0
newplayer = 0
newplayerip = ""
gt = "start"
demsg = "start"
time = 1
status = 0
day = 1
conclient = 1
print("DONE! The UDP Server is now started and Waiting for client's on port 5000")
while 1:
try:
data, address = server_socket.recvfrom(1024)
if not data: break
################### reciving data! ###################
UPData = pickle.loads(data)
status = UPData[0][[0][0]
if status > 998: ##### it is here the error are given####
try:
e = len(L)
ori11 = UPData[0][1][0]
ori12 = UPData[0][1][1]
ori13 = UPData[0][1][2]
ori14 = UPData[0][1][3]
ori21 = UPData[0][1][4]
ori22 = UPData[0][1][5]
ori23 = UPData[0][1][6]
ori24 = UPData[0][1][7]
ori31 = UPData[0][2][0]
ori32 = UPData[0][2][1]
ori33 = UPData[0][2][2]
ori34 = UPData[0][2][3]
ori41 = UPData[0][2][4]
ori42 = UPData[0][2][5]
ori43 = UPData[0][2][6]
ori44 = UPData[0][2][7]
ori51 = UPData[0][3][0]
ori52 = UPData[0][3][1]
ori53 = UPData[0][3][2]
ori54 = UPData[0][3][3]
ori61 = UPData[0][3][4]
ori62 = UPData[0][3][5]
ori63 = UPData[0][3][6]
ori64 = UPData[0][3][7]
ori71 = UPData[0][4][0]
ori72 = UPData[0][4][1]
ori73 = UPData[0][4][2]
ori74 = UPData[0][4][3]
ori81 = UPData[0][4][4]
ori82 = UPData[0][4][5]
ori83 = UPData[0][4][6]
ori84 = UPData[0][4][7]
ori91 = UPData[0][5][0]
ori92 = UPData[0][5][1]
ori93 = UPData[0][5][2]
ori94 = UPData[0][5][3]
ori101 = UPData[0][5][4]
ori102 = UPData[0][5][5]
ori103 = UPData[0][5][6]
ori104 = UPData[0][5][7]
npcp11 = UPData[0][6][0]
npcp12 = UPData[0][6][1]
npcp13 = UPData[0][6][2]
npcp21 = UPData[0][6][3]
npcp22 = UPData[0][6][4]
npcp23 = UPData[0][6][5]
npcp31 = UPData[0][6][6]
npcp32 = UPData[0][6][7]
npcp33 = UPData[0][7][0]
npcp41 = UPData[0][7][1]
npcp42 = UPData[0][7][2]
npcp43 = UPData[0][7][3]
npcp51 = UPData[0][7][4]
npcp52 = UPData[0][7][5]
npcp53 = UPData[0][7][6]
npcp61 = UPData[0][7][7]
npcp62 = UPData[0][8][0]
npcp63 = UPData[0][8][1]
npcp71 = UPData[0][8][2]
npcp72 = UPData[0][8][3]
npcp73 = UPData[0][8][4]
npcp81 = UPData[0][8][5]
npcp82 = UPData[0][8][6]
npcp83 = UPData[0][8][7]
npcp91 = UPData[1][0][0]
npcp92 = UPData[1][0][1]
npcp93 = UPData[1][0][2]
npcp101 = UPData[1][0][3]
npcp102 = UPData[1][0][4]
npcp103 = UPData[1][0][5]
d0 = (status, )
d1 = (ori11,ori12,ori13,ori14,ori21,ori22,ori23,ori24)
d2 = (ori31,ori32,ori33,ori34,ori41,ori42,ori43,ori44)
d3 = (ori51,ori52,ori53,ori54,ori61,ori62,ori63,ori64)
d4 = (ori71,ori72,ori73,ori74,ori81,ori82,ori83,ori84)
d5 = (ori91,ori92,ori93,ori94,ori101,ori102,ori103,ori104)
d6 = (npcp11,npcp21,npcp31,npcp21,npcp22,npcp23,npcp31,npcp32)
d7 = (npcp33,npcp41,npcp42,npcp43,npcp51,npcp52,npcp53,npcp61)
d8 = (npcp62,npcp63,npcp71,npcp72,npcp72,npcp81,npcp82,npcp83)
d9 = (npcp91,npcp92,npcp93,npcp101,npcp102,npcp103)
pack1 = (d0,d1,d2,d3,d4,d5,d6,d7,d8)
pack2 = (d9, )
dat = pickle.dumps((pack1,pack2))
while tn < e:
server_socket.sendto(dat, (L[tn],3560))
tn = tn + 1
except:
pass
print("could not send data to some one or could not run the server at all")
else:
the part where the console tells me my error is is here:
if status > 998:
The problem is here:
status = UPData[0][[0][0]
The second opened bracket [ is not closed. The Python compiler keeps looking for the closing bracket, finds if on the next line and gets confused because if is not supposed to be inside brackets.
You may want to remove this bracket, or close it, according to your specific needs (the structure of UPData)