C more reliable for networking than Python? - python

What I'm trying in Python
I am trying to copy a large file over a TCP connection in python (3.6). I have two functions: send_chunk (sends a little header, then some data) and recv_chunk (parses that header, returns the data). I split the file I'm sending into chunks and put them on the network as fast as it lets me. Until around 4-5 MB, everything works. Then, recv_chunk receives some incorrect data, and everything is toast.
What works in C
The same operation in C (as demonstrated by netcat) has no problem sending a 100MB file with no errors (also much lower CPU usage). I looked in the netcat code, and I just see normal-old select and read/write calls.
Question of the day:
What could be going wrong? Why does it seem so simple in C but it isn't working in python?
code, for reference:
def send_chunk(data, sock):
if type(data) is str:
data = bytes(data, 'utf8')
len_str = "{}".format(len(data))
len_str_size = chr(len(len_str))
send_data = bytes(len_str_size+len_str, 'ascii')+data
total_sent = 0
total_len = len(send_data)
while total_sent < total_len:
data_sent = sock.send(send_data[total_sent:])
print('%f sending %d'%(time(),total_len))
if data_sent < total_len:
print('only sent %d'%data_sent,flush=True)
total_sent += data_sent
def recv_chunk(sock):
payload_data = b''; size = 0
len_data = b''; len_size = 0
# get the length field size
len_size = ord(sock.recv(1))
# get the length field
while len(len_data) < len_size:
len_data += sock.recv(len_size-len(len_data))
size = int(len_data)
# get the data
while len(payload_data) < size:
payload_data += sock.recv(min(size-len(payload_data), 2048))
return payload_data

Your code works for me, but copying your data many times make this slow.
Simply use sendall:
def send_chunk(data, sock):
if isinstance(data, str):
data = bytes(data, 'utf8')
sock.sendall(len(data).to_bytes(4, 'little'))
sock.sendall(data)
def recv_chunk(sock):
size = b""
while len(size) < 4:
size += sock.recv(4-len(size))
bytes_left = int.from_bytes(size, 'little')
# get the data
data = []
while bytes_left:
d = sock.recv(bytes_left)
data.append(d)
bytes_left -= len(d)
return b''.join(data)

Related

Error status after sending 32768 bytes from serial port in Python

I am making uart communication using serial library. I can send and receive data, but when the number of bytes sent reaches 32768 or more, I get the error;
ValueError: byte must be in range(0,256)
function definition:
def Write_to_serial_port(value):
ser.write([value])
function usage:
#...
for i in data_buf[0:total_len]: # data_buf is a list
Write_to_serial_port(i)
#...
The error message that occurs when the number of bytes sent reaches 32768:
An alternative serial port write function I tested:
def Write_to_serial_port(value):
data = struct.pack('>B', value)
ser.write(data)
Again the error message that occurs when the number of bytes sent reaches 32768:
I also tried periodically flushing the input and output buffers, but it didn't help.
Any ideas on the solution?
EDIT1:
The purpose of the program is to send the bytes in the binary file. While doing this, I send 128 bytes (from the binary file) and 13 bytes of CRC, file size etc information over the serial port in each cycle. data_buff size is 255 bytes but I am using 141 bytes.
function usage(Extended):
# ...
# Some definitions and assignments
# ...
while(bytes_remaining):
if(bytes_remaining >= 128):
len_to_read = 128
else:
len_to_read = bytes_remaining
for x in range(len_to_read):
file_read_value = bin_file.read(1)
data_buf[9+x] = int(file_read_value[0])
data_buf[0] = mem_write_cmd_total_len-1
data_buf[1] = write_cmd
data_buf[2] = word_to_byte(base_mem_address,1,1)
data_buf[3] = word_to_byte(base_mem_address,2,1)
data_buf[4] = word_to_byte(base_mem_address,3,1)
data_buf[5] = word_to_byte(base_mem_address,4,1)
data_buf[6] = gl_bin_file_sector_needed
data_buf[7] = len_to_read
data_buf[8] = send_count
send_count = send_count + 1
crc32 = get_crc(data_buf,mem_write_cmd_total_len - 4)
data_buf[9 +len_to_read] = word_to_byte(crc32,1,1)
data_buf[10+len_to_read] = word_to_byte(crc32,2,1)
data_buf[11+len_to_read] = word_to_byte(crc32,3,1)
data_buf[12+len_to_read] = word_to_byte(crc32,4,1)
for i in data_buf[0:mem_write_cmd_total_len]:
Write_to_serial_port(i)
#...
Error Message
EDIT2: I also tried splitting the 40KB binary file into 128byte chunk files and sending it. But I got the same error on the 256th file. I guess 256*128 = 32768 can't be a coincidence.

TransportError (429) 'Data too large' when loading JSON docs to ElasticSearch

I have a process running in Python 3.7 that loads JSON files, gathers file rows into chunks in async queues, and incrementally posts chunks to ElasticSearch for indexing.
Chunking is meant to avoid overloading the ElasticSearch connection.
def load_files_to_queue(file_queue, incomplete_files, doc_queue, index):
logger.info("Initializing load files to queue")
while True:
try:
current_file = file_queue.get(False)
logger.info("Loading {} into queue.".format(current_file))
iteration_counter = 0
with open(current_file) as loaded_file:
iterator = json_iterator(loaded_file)
current_type = "doctype"
chunk = []
for row in iterator:
# Every so often check the queue size
iteration_counter += 1
if iteration_counter > 5000:
# If it gets too big, pause until it has gone
# down a bunch.
if doc_queue.qsize() > 30:
logger.info(
"Doc queue at {}, pausing until smaller.".format(
doc_queue.qsize()
)
)
while doc_queue.qsize() > 10:
time.sleep(0.5)
iteration_counter = 0
for transformed in transform_single_doc(current_type, row, index):
if transformed:
chunk.append(transformed)
# NOTE: Send messages in chunks in stead of single rows so that queue
# has less frequent locking
if len(chunk) >= DOC_QUEUE_CHUNK_SIZE:
doc_queue.put(chunk)
chunk = []
if chunk:
doc_queue.put(chunk)
incomplete_files.remove(current_file)
logger.info("Finished loading {} into queue.".format(current_file))
logger.info("There are {} files left to load.".format(file_queue.qsize()))
except Empty:
break
def bulk_load_from_queue(file_queue, incomplete_files, doc_queue, chunk_size=500):
"""
Represents a single worker thread loading docs into ES
"""
logger.info("Initialize bulk doc loader {}".format(threading.current_thread()))
conn = Elasticsearch(settings.ELASTICSEARCH, timeout=180)
dequeue_results(
streaming_bulk(
conn,
load_docs_from_queue(file_queue, incomplete_files, doc_queue),
max_retries=2,
initial_backoff=10,
chunk_size=chunk_size,
yield_ok=False,
raise_on_exception=True,
raise_on_error=True,
)
)
logger.info("Shutting down doc loader {}".format(threading.current_thread()))
Occasionally an error like this would happen in bulk_load_from_queue, which I interpret to mean the chunk was too large.
TransportError(429, 'circuit_breaking_exception', '[parent] Data too large, data for [<http_request>] would be [1024404322/976.9mb], which is larger than the limit of [1011774259/964.9mb], real usage: [1013836880/966.8mb], new bytes reserved: [10567442/10mb], usages [request=32880/32.1kb, fielddata=7440/7.2kb, in_flight_requests=164031664/156.4mb, accounting=46679308/44.5mb]')
Re-running usually resolved this, but the error has become too frequent. So I looked to enforce a chunk size limit in load_files_to_queue like so:
for transformed in transform_single_doc(current_type, row, index):
if transformed:
chunk_size = chunk_size + sys.getsizeof(transformed)
chunk.append(transformed)
# NOTE: Send messages in chunks in stead of single rows so that queue
# has less frequent locking
if (
chunk_size >= DOC_QUEUE_CHUNK_SIZE
or len(chunk) >= DOC_QUEUE_CHUNK_LEN
):
doc_queue.put(chunk)
chunk = []
chunk_size = 0
if len(chunk) > 0:
doc_queue.put(chunk)
This results in a handful of these errors towards the end of processing:
ConnectionResetError
[Errno 104] Connection reset by peer
and then:
EOFError multiprocessing.connection in _recv
basically this means your request to Elasticsearch was too large for it to handle, so you could try reducing the chunk size
alternatively, look at using the _bulk api, there are helpers in the python clients which should take most of the pain away for this
We are encountering this again in our QA environment #brenden. do you suggest to further reduce the chunk size ? currently being passed as 200.
for worker in range(doc_worker_count):
job = doc_pool.apply_async(
bulk_load_from_queue,
args=(file_queue, incomplete_files, doc_queue, 200),
error_callback=error_callback,
)
jobs.append(job)

Python - Windows Raw Disk unable to read final sectors

When accessing a Raw Disk on Windows via Python open(), it for whatever reason does not allow me to read the last 10240 bytes (aka last 5 sectors at 2048 bytes/sector).
When dumping the disc image by other means and comparing the images I can see that the data cannot be assumed to be empty either. In fact, the first of the missing sectors has a UDF Anchor Tag present with related metadata in it. The following sectors are entirely blank.
This is how I dumped the disc contents:
out = open("test.iso", "wb")
with open(r"\\.\D:", "rb") as f:
while True:
data = f.read(512)
if len(data) == 0:
break
out.write(data)
If I take that same open() object and tell it to seek to the very end of the disc, it does. So it can clearly reach the sectors at least in terms of seeking. If I then seek back 10240 bytes then attempt to f.read(...), it returns b'' (empty result) and not an error. It doesn't matter what size I tell it to read either. I tried all kinds of sizes, no-arg/default, 1, 12, 255, 512, 2048, 999999, etc.
Another StackOverflow answer on a different (but related) question also reported similar findings on Enhanced Audio Discs but seemingly no discussion was brought up since.
I have tested this on multiple DVD discs from varying kinds of studios and creators, all of which are in great condition with it still occurring.
Example reproducing code:
I don't know if its gonna happen to you on your system config/disc/reader).
PyPI Dependencies: wmic
WMIC reports the disc size-10240 as well, perhaps it's a Windows issue?
import os
from wmi import WMI
DISC_LETTER = "D:"
c = WMI()
disc_info = next(iter(c.Win32_CDROMDrive(Drive=DISC_LETTER)), None)
if not disc_info:
raise("Disc %s not found...", DISC_LETTER)
disc_size = int(disc_info.size)
disc_size += 10240 # WMIC also reports the size without 10240, but it is real!
f = open(r"\\.\%s" % DISC_LETTER, "rb")
f.seek(disc_size)
if f.tell() == disc_size:
print("Seeked to the end of the disc...")
f.seek(-10240, os.SEEK_CUR)
if f.tell() == disc_size - (2048 * 5):
print("Seeked 5 sectors before the end of the disc...")
data = f.read(2048 * 5):
print("Data read (len: %d): %b" % (len(data), data))
Any ideas on why this might be would great as I have tried everywhere I could.
It seems this occurs as open(r'\\.\N:') opens the device with restricted boundaries.
My solution was to open the disc with IOCTL instead of open(). Specifically with CreateFile, DeviceIoControl, and FSCTL_ALLOW_EXTENDED_DASD_IO.
handle = win32file.CreateFile(
r"\\.\D:",
win32con.MAXIMUM_ALLOWED,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL,
None
)
if handle == win32file.INVALID_HANDLE_VALUE:
raise RuntimeError("Failed to obtain device handle...")
win32file.DeviceIoControl(handle, winioctlcon.FSCTL_ALLOW_EXTENDED_DASD_IO, None, None)
From here I can use ReadFile and SetFilePointer as replacements for read and seek respectively.
I even worked on a new class that loads it all and allows you to dynamically read and seek without having to worry about sector alignment.
class Win32Device:
"""
Class to read and seek a Windows Raw Device IO object without bother.
It deals with getting the full size, allowing full access to all sectors,
and alignment with the discs sector size.
Author: PHOENiX <pragma.exe#gmail.com>
License: Free, enjoy! This should be a thing open() does by default.
"""
def __init__(self, target):
# type: (str) -> None
self.target = target
self.sector_size = None
self.disc_size = None
self.position = 0
self.handle = self.get_handle()
self.geometry = self.get_geometry()
def __enter__(self):
return self
def __exit__(self, *_, **__):
self.dispose()
def __len__(self) -> int:
return self.geometry[-2]
def dispose(self):
if self.handle != win32file.INVALID_HANDLE_VALUE:
win32file.CloseHandle(self.handle)
def get_target(self):
# type: () -> str
"""Get UNC target name. Can be `E:` or `PhysicalDriveN`."""
target = self.target
if not target.startswith("\\\\.\\"):
target += rf"\\.\{target}"
return target
def get_handle(self):
# type: () -> int
"""Get a direct handle to the raw UNC target, and unlock its IO capabilities."""
handle = win32file.CreateFile(
# https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
self.get_target(), # target
win32con.MAXIMUM_ALLOWED, # desired access
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, # share mode, write needed
None, # security attributes
win32con.OPEN_EXISTING, # creation disposition
win32con.FILE_ATTRIBUTE_NORMAL, # flags and attributes
None # template file
)
if handle == win32file.INVALID_HANDLE_VALUE:
raise RuntimeError("Failed to obtain device handle...")
# elevate accessible sectors, without this the last 5 sectors (in my case) will not be readable
win32file.DeviceIoControl(handle, winioctlcon.FSCTL_ALLOW_EXTENDED_DASD_IO, None, None)
return handle
def get_geometry(self):
# type: () -> tuple[int, ...]
"""
Retrieves information about the physical disk's geometry.
https://learn.microsoft.com/en-us/windows/win32/api/winioctl/ns-winioctl-disk_geometry_ex
Returns a tuple of:
Cylinders-Lo
Cylinders-Hi
Media Type
Tracks Per Cylinder
Sectors Per Track
Bytes Per Sector
Disk Size
Extra Data
"""
return struct.unpack("8L", win32file.DeviceIoControl(
self.handle, # handle
winioctlcon.IOCTL_DISK_GET_DRIVE_GEOMETRY_EX, # ioctl api
b"", # in buffer
32 # out buffer
))
def tell(self):
# type: () -> int
"""Get current (spoofed) position."""
return self.position
def _tell(self):
# type: () -> int
"""Get current real position."""
if not self.handle:
self.handle = self.get_handle()
return win32file.SetFilePointer(self.handle, 0, win32file.FILE_CURRENT)
def seek(self, offset, whence=os.SEEK_SET):
# type: (int, int) -> int
"""Seek at any point in the stream, in an aligned way."""
if whence == os.SEEK_CUR:
whence = self.tell()
elif whence == os.SEEK_END:
whence = len(self)
to = whence + offset
closest = self.align(to) # get as close as we can while being aligned
if not self.handle:
self.handle = self.get_handle()
pos = win32file.SetFilePointer(self.handle, closest, win32file.FILE_BEGIN)
if pos != closest:
raise IOError(f"Seek was not precise...")
self.position = to # not actually at this location, read will deal with it
return to
def read(self, size=-1):
# type: (int) -> Optional[bytes]
"""Read any amount of bytes in the stream, in an aligned way."""
if not self.handle:
self.handle = self.get_handle()
sector_size = self.geometry[-3]
offset = abs(self._tell() - self.tell())
has_data = b''
while self._tell() < self.tell() + size:
res, data = win32file.ReadFile(self.handle, sector_size, None)
if res != 0:
raise IOError(f"An error occurred: {res} {data}")
if len(data) < sector_size:
raise IOError(f"Read {sector_size - len(data)} less bytes than requested...")
has_data += data
# seek to the position wanted + size read, which will then be re-aligned
self.seek(self.tell() + size)
return has_data[offset:offset + size]
def align(self, size, to=None):
# type: (int, Optional[int]) -> int
"""
Align size to the closest but floor mod `to` value.
Examples:
align(513, to=512)
>>>512
align(1023, to=512)
>>>512
align(1026, to=512)
>>>1024
align(12, to=10)
>>>10
"""
if not to:
to = self.geometry[-3] # logical bytes per sector value
return math.floor(size / to) * to

socketcan J1939 filter use in python

In Python, I am trying to use the J1939 filtering as mentionned in the linux kernel docs: https://www.kernel.org/doc/html/latest/networking/j1939.html
The following code fails at the setsockopt() line (setting up filters):
import socket
import struct
def pack_J1939_filters(can_filters):
can_filter_fmt = "=" + "2Q2B2I" * len(can_filters)
filter_data = []
for can_filter in can_filters:
name = can_filter['name']
name_mask = can_filter['name_mask']
addr = can_filter['addr']
addr_mask = can_filter['addr_mask']
pgn = can_filter['pgn']
pgn_mask = can_filter['pgn_mask']
filter_data.append(name)
filter_data.append(name_mask)
filter_data.append(addr)
filter_data.append(addr_mask)
filter_data.append(pgn)
filter_data.append(pgn_mask)
return struct.pack(can_filter_fmt, *filter_data)
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
interface = "vcan0"
src_name = socket.J1939_NO_NAME
src_pgn = socket.J1939_NO_PGN
src_addr = 0x81
src_sck_addr = (interface, src_name, src_pgn, src_addr)
s.bind(src_sck_addr)
filters = [{"name": 0, "name_mask":0, "addr":0, "addr_mask":0, "pgn": 0, "pgn_mask": 0}]
packed_filters = pack_J1939_filters(filters)
# socket.SOL_CAN_J1939 does not seem to exist
SOL_CAN_BASE = 100
CAN_J1939 = 7
SOL_CAN_J1939 = SOL_CAN_BASE + CAN_J1939
s.setsockopt(SOL_CAN_J1939, socket.SO_J1939_FILTER , packed_filters)
s.recvfrom(128)
s.close()
First, the kernel documentation mentions to use SOL_CAN_J1939 as the first argument. However socket.SOL_CAN_J1939 does not exist in the socket package. So looking at the code at this location I was able to understand that this int value should be 107: http://socket-can.996257.n3.nabble.com/RFC-v3-0-6-CAN-add-SAE-J1939-protocol-td7571.html
As for the setsockopt() third argument, I packed the filters to match the j1939_filter structure (26 bytes as described in the code from the previous link). This is similar to what is done in can.interfaces.socketcan.utils for raw CAN.
What am I doing wrong to cause setsockopt() to fail?
The first issue was with the struct.pack format (can_filter_fmt) being wrong. I first assumed that the kernel j1939_filter structure size was the sum of the members. This is wrong since the compiler adds padding. This can be added to the struct.pack format as x such as 2Q2I2B6x. Please see Why isn't sizeof for a struct equal to the sum of sizeof of each member?
The second issue was that can_filter_fmt is not packed as 2Q2B2I but as 2Q2I2B6x (the addr member is in the middle).
As for SOL_CAN_J1939 I was correct and needs to be created in file because it is not yet in the package.
The final code is the following:
#!/usr/bin/env python3
import socket
import struct
def pack_J1939_filters(can_filters=None):
if can_filters is None:
# Pass all messages
can_filters = [{}]
can_filter_fmt = "=" + "2Q2I2B6x" * len(can_filters)
filter_data = []
for can_filter in can_filters:
if 'name' in can_filter:
name = can_filter['name']
else:
name = 0
if 'name_mask' in can_filter:
name_mask = can_filter['name_mask']
else:
name_mask = 0
if 'pgn' in can_filter:
pgn = can_filter['pgn']
else:
pgn = 0
if 'pgn_mask' in can_filter:
pgn_mask = can_filter['pgn_mask']
else:
pgn_mask = 0
if 'addr' in can_filter:
addr = can_filter['addr']
else:
addr = 0
if 'addr_mask' in can_filter:
addr_mask = can_filter['addr_mask']
else:
addr_mask = 0
filter_data.append(name)
filter_data.append(name_mask)
filter_data.append(pgn)
filter_data.append(pgn_mask)
filter_data.append(addr)
filter_data.append(addr_mask)
return struct.pack(can_filter_fmt, *filter_data)
def print_msg(data, sck_addr):
print(f"SA:{hex(sck_addr[3])} PGN:{hex(sck_addr[2])}")
for j in range(len(data)):
if j % 8 == 0 and j != 0:
print()
if j % 8 == 0:
print(f"bytes {j} to {j+7}: ", end="")
print(f"{hex(data[j])} ", end="")
print()
print()
def main():
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
# allows to receive broadcast messages
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
interface = "vcan0"
src_name = socket.J1939_NO_NAME
src_pgn = socket.J1939_NO_PGN # always no PGN for source, unless filtering is needed
src_addr = 0x81 # recvfrom() will not return destination specific messages for other addresses
src_sck_addr = (interface, src_name, src_pgn, src_addr)
s.bind(src_sck_addr)
packed_filters = pack_J1939_filters()
SOL_CAN_BASE = 100
CAN_J1939 = 7
SOL_CAN_J1939 = SOL_CAN_BASE + CAN_J1939
s.setsockopt(SOL_CAN_J1939, socket.SO_J1939_FILTER , packed_filters)
(recv_data, recv_sck_addr) = s.recvfrom(128)
print_msg(recv_data, recv_sck_addr)
s.close()
if __name__ == "__main__":
main()
Thank you.
For J1939 to work with SocketCAN you need two things:
kernel 5.4+
can-j1939 kernel module enabled
Testing for can-1939:
If you install can-utils and after sudo modprobe can-j1939 all you get is fatal error, or if you start testj1939 from can-utils and you get error that protocol is not supported, then it means that can-j1939 was not enabled in your kernel and you need to compile it manually.
Here are my instructions for enabling can-j1939 in Debian 10 kernel:
https://github.com/linux-can/can-utils/blob/master/can-j1939-install-kernel-module.md

Raw socket python packet sniffer

I have created a simple RAW socket based packet sniffer. But when I run it, it rarely captures up a packet. First I created this to capture packets in 1 second time intervals, but seeing no packets are captured I commented that line. I was connected to internet and a lot of http traffic are going here and there, but I could not capture a one. Is there a problem in this in the code where I created the socket? Please someone give me a solution. I am fairly new to python programming and could not understand how to solve this.
import socket, binascii, struct
import time
sock = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x800))
print "Waiting.."
pkt = sock.recv(2048)
print "received"
def processEth(data):
#some code to process source mac and dest. mac
return [smac, dmac]
def processIP(data):
sip = str(binascii.hexlify(data[1]))
dip = str(binascii.hexlify(data[2]))
return [sip, dip]
def processTCP(data):
sport = str(data[0])
dport = str(data[1])
return [sport, dport]
while len(pkt) > 0 :
if(len(pkt)) > 54:
pkt = sock.recv(2048)
ethHeader = pkt[0][0:14]
ipHeader = pkt[0][14:34]
tcpHeader = pkt[0][34:54]
ethH = struct.unpack("!6s6s2s",ethHeader)
ethdata = processEth(ethH)
ipH = struct.unpack("!12s4s4s",ipHeader)
ipdata = processIP(ipH)
tcpH = struct.unpack("!HH16", tcpHeader)
tcpdata = processTCP(tcpH)
print "S.mac "+ethdata[0]+" D.mac "+ethdata[1]+" from: "+ipdata[0]+":"+tcpdata[0]+" to: "+ipdata[1]+":"+tcpdata[1]
#time.sleep(1);
else:
continue
If you showed all the code, you are running into an endless loop.
Whenever a paket is coming in which has not a length greater then 54 bytes, you end up reading the same packet all the time.
Additionally, socket.recv() returns a string/byte sequence; your approach of accessing the data is wrong. pkt[0] returns a string with length 1; pkt[0][x:y] will not return something useful.
I am not familiar with using sockets, but with some changes I got output that might look similar to what you intended (there is something missing in processEth() I think...).
[...]
while len(pkt) > 0:
print "Waiting.."
pkt = sock.recv(2048)
print "received"
if(len(pkt)) > 54:
ethHeader = pkt[0:14]
ipHeader = pkt[14:34]
tcpHeader = pkt[34:38]
ethH = struct.unpack("!6s6s2s",ethHeader)
ethdata = processEth(ethH)
ipH = struct.unpack("!12s4s4s",ipHeader)
ipdata = processIP(ipH)
tcpH = struct.unpack("!HH16", tcpHeader)
tcpdata = processTCP(tcpH)
print "S.mac "+ethdata[0]+" D.mac "+ethdata[1]+" from: "+ipdata[0]+":"+tcpdata[0]+" to: "+ipdata[1]+":"+tcpdata[1]
#time.sleep(1);
else:
continue

Categories