I am trying to implement an FTP server using twisted that limits the size of the uploaded file. Ideally this would happen before the transfer starts, but it is not really a problem if it exits gracefully during the transfer if it is too large.
I have started from the very basic ftpserver.py and slowly been pulling in more of the underlying classes from ftp.py to get down to the innards.
Current code below, please excuse the 'hack-and-slash' style employed until I can get it working.
#!/usr/bin/python
import os
from twisted.protocols.ftp import FTPFactory, FTPShell, FTPAnonymousShell, IFTPShell
from twisted.cred.portal import Portal
from twisted.cred.checkers import AllowAnonymousAccess
from twisted.internet import reactor, defer
from twisted.python import filepath, failure
class FileConsumer1(object):
def __init__(self, fObj):
self.fObj = fObj
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming
def unregisterProducer(self):
self.producer = None
self.fObj.close()
def write(self, bytes):
size = os.fstat(self.fObj.fileno()).st_size + len(bytes)
if size > 10:
raise Exception("File too large") # WHAT GOES HERE?
self.fObj.write(bytes)
class FileWriter1(object):
def __init__(self, fObj):
self.fObj = fObj
self._receive = False
def receive(self):
assert not self._receive, "Can only call IWriteFile.receive *once* per instance"
self._receive = True
return defer.succeed(FileConsumer1(self.fObj))
def close(self):
return defer.succeed(None)
class FTPShell1(FTPShell):
def openForWriting(self, path):
p = self._path(path)
if p.isdir():
return defer.fail(IsADirectoryError(path))
try:
fObj = p.open('w')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
return defer.succeed(FileWriter1(fObj))
class FTPRealm1(object):
def __init__(self, root):
self.path = filepath.FilePath(root)
def requestAvatar(self, avatarId, mind, *interfaces):
avatar = FTPShell1(self.path)
return (IFTPShell, avatar, getattr(avatar, 'logout', lambda: None))
p = Portal(FTPRealm1('./'), [ AllowAnonymousAccess() ])
f = FTPFactory(p)
reactor.listenTCP(4021, f)
reactor.run()
clearly the check if size > 10 will be bigger, but how should a be indicating there's a problem at this point? As it stands, twisted catches that exception, but it's not very elegant. As far as I can see from examination of ftp.py there's nothing obvious I can return here. Can I pass down a deferred in some way? How should I be closing down the transfer elegantly?
Thanks,
Here's a revised version
#!/usr/bin/python
import os
from zope.interface import Interface, implements
from twisted.protocols.ftp import FTPFactory, FTPShell, FTPAnonymousShell, IFTPShell, IWriteFile , BaseFTPRealm, FTPCmdError, EXCEEDED_STORAGE_ALLOC
from twisted.cred.portal import Portal
from twisted.cred.checkers import AllowAnonymousAccess
from twisted.internet import reactor, defer, interfaces
from twisted.python import filepath
class ExceededStorageAllocError(FTPCmdError):
errorCode = EXCEEDED_STORAGE_ALLOC
class FileConsumer(object):
implements(interfaces.IConsumer)
def __init__(self):
self.data = ""
self.error = None
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming
def unregisterProducer(self):
if self.producer:
self.producer.stopProducing()
self.producer = None
def write(self, bytes):
self.data += bytes
if len(self.data) > 10:
self.unregisterProducer()
self.error = ExceededStorageAllocError()
class FileWriter(object):
implements(IWriteFile)
def __init__(self, path):
self.path = path
def receive(self):
self.consumer = FileConsumer()
return defer.succeed(self.consumer)
def close(self):
if self.consumer.error:
return defer.fail(self.consumer.error)
try:
f = self.path.open('w')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
f.write(self.consumer.data)
return defer.succeed(None)
class FTPShell1(FTPShell):
makeDirectory = FTPAnonymousShell.makeDirectory
removeDirectory = FTPAnonymousShell.removeDirectory
def openForWriting(self, path):
p = self._path(path)
if p.isdir():
return defer.fail(IsADirectoryError(path))
return defer.succeed(FileWriter(p))
class FTPRealm1(BaseFTPRealm):
def __init__(self, root):
self.root = root
def requestAvatar(self, avatarId, mind, *interfaces):
avatar = FTPShell1(filepath.FilePath(self.root))
return (IFTPShell, avatar, getattr(avatar, 'logout', lambda: None))
p = Portal(FTPRealm1('./'), [ AllowAnonymousAccess() ])
f = FTPFactory(p)
reactor.listenTCP(4021, f)
reactor.run()
which accumulates the received data within the FileConsumer() then aborts if the file is too long. the close() method of the FileWriter() then either reports that error or writes the complete buffer to the file.
The only real issue I'm having with this is that when run, the exception is displayed on the server:
Unexpected error received during transfer:
Traceback (most recent call last):
Failure: __main__.ExceededStorageAllocError:
As a quick disclaimer, I'm very bad with Twisted's producer/consumer model, so this may not work. As always, I'm not responsible if things blow up ;)
You seem to be on the correct path so pat yourself on the back for that. I think if you call unregisterProducer when a file is too large, the file should stop consuming. You may also need to call self.producer.stopProducing(), but don't quote me on that.
def unregisterProducer(self):
self.producer.stopProducing()
self.fObj.close()
def write(self, bytes):
size = os.fstat(self.fObj.fileno()).st_size + len(bytes)
if size > 10:
self.unregisterConsumer()
# log statements would go here
# do some clean up too
self.fObj.write(bytes)
If my mental code Python interpreter is correct, this should simply just stop consuming the file. As far as what you should return to the client, you're going to have to read the RFC about FTP to figure that out.
PS
As tedious as it may seem, please use the #implementor decorators. Most times you'll be fine, but there may be instances where unexpected trace backs appear.
Related
I would like use "try except" statement, but in two function. I caught an exception in function, but function2 does anyway. How can i stop it until there is an exception
i want to transfer it to a window application. If the file does not load, I want to display an information window. I only want the program to go on (function2) when the file loads
class Files:
def __init__(self):
self.name = "fle.txt"
def function(self):
try:
self.f = open(self.name, 'rb')
except OSError:
print("Problem!!!")
def function2(self):
print(self.f.read())
def main():
file=Files()
file.function()
file.function2()
Don't catch an exception unless you actually know how to handle it.
class Files:
def __init__(self):
self.name = "fle.txt"
self.f = None
def function(self):
self.f = open(self.name, 'rb')
def function2(self):
if self.f is None:
raise Exception("File not initialized!") #Example
#return #just if you don't throw or exit
print(self.f.read())
def main():
file=Files()
try:
file.function()
except OSError:
print("Problem!!!")
else:
file.function2()
main()
Wrap your function calls in a higher level try/except.
Of course, you would never write anything like this because it's so inflexible. This answer does not condone the OP's approach but suggests how that could be made to work.
class Files:
def __init__(self):
self.name = 'fle.txt'
def function_1(self):
self.fd = open(self.name)
def function_2(self):
print(self.fd.read())
def __del__(self):
try:
self.fd.close()
except Exception:
pass
file = Files()
try:
file.function_1()
file.function_2()
except Exception as e:
print(e)
So we don't do any exception handling (except in __del__ where we ignore any issues) within the class functions but allow all/any exceptions to propagate to the caller. Here we want to call two class functions but we wrap them in the same try/except block.
If function_1 raises an exception, function_2 won't be called.
del added to show how one could clean up but it's not the way this should be handled
#tomgalpin is right you could just exit right there after the problem
But this being a class maybe you want to print the error and pass back no data?
Here's one way to look at that with Tom's included sys exit (commented out)
Also be sure if you keep your code to close the file handler. Calling open on a file without .close() can leave file handlers open and cause problems for you if your class were to continue on after.
class Files:
def __init__(self):
self.name = "fle.txt"
# Empty string in data if no data
self.data = ""
def function(self):
try:
#self.f = open(self.name, 'rb')
with open(self.name, 'rb') as f:
self.data = f.read()
except OSError as err:
print("Problem!!!", err)
# You could exit
# sys.exit()
# But you could also return an empty string,
# which is "falsy", regardless of what happens
finally:
return self.data
def function2(self):
print(f"Data 3 {self.data}")
def main():
file=Files()
# You could print this, or use it to get the data
print("Data 1", file.function())
data = file.function()
print(f"Data 2 {data}")
# this now also shows the data
file.function2()
Use the variable that is usually True but becomes False if function fails
Example
class Files:
def __init__(self):
self.name = "file.txt"
self.Continue=True
self.data = ""
def function(self):
try:
#self.f = open(self.name, 'rb')
with open(self.name, 'rb') as f:
self.data = f.read()
except OSError as err:
print("Problem!!!", err)
self.Continue=False
return False
finally:
return self.data
def function2(self):
if self.Continue:
print(self.data)
else:
#Code if function failed
def main():
file=Files()
file.function()
file.function2()
I am currently working through the twisted Developer Guides and would like to get some information/guide regarding extending the usecase of one of the provides examples, namely caching instances of objects:
The example code is:
cache.classes.py
from twisted.spread import pb
class MasterDuckPond(pb.Cacheable):
def __init__(self, ducks):
self.observers = []
self.ducks = ducks
def count(self):
print "I have [%d] ducks" % len(self.ducks)
def addDuck(self, duck):
self.ducks.append(duck)
for o in self.observers: o.callRemote('addDuck', duck)
def removeDuck(self, duck):
self.ducks.remove(duck)
for o in self.observers: o.callRemote('removeDuck', duck)
def getStateToCacheAndObserveFor(self, perspective, observer):
self.observers.append(observer)
# you should ignore pb.Cacheable-specific state, like self.observers
return self.ducks # in this case, just a list of ducks
def stoppedObserving(self, perspective, observer):
self.observers.remove(observer)
class SlaveDuckPond(pb.RemoteCache):
# This is a cache of a remote MasterDuckPond
def count(self):
return len(self.cacheducks)
def getDucks(self):
return self.cacheducks
def setCopyableState(self, state):
print " cache - sitting, er, setting ducks"
self.cacheducks = state
def observe_addDuck(self, newDuck):
print " cache - addDuck"
self.cacheducks.append(newDuck)
def observe_removeDuck(self, deadDuck):
print " cache - removeDuck"
self.cacheducks.remove(deadDuck)
pb.setUnjellyableForClass(MasterDuckPond, SlaveDuckPond)
cache_sender.py
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb, jelly
from twisted.python import log
from twisted.internet import reactor
from cache_classes import MasterDuckPond
class Sender:
def __init__(self, pond):
self.pond = pond
def phase1(self, remote):
self.remote = remote
d = remote.callRemote("takePond", self.pond)
d.addCallback(self.phase2).addErrback(log.err)
def phase2(self, response):
self.pond.addDuck("ugly duckling")
self.pond.count()
reactor.callLater(1, self.phase3)
def phase3(self):
d = self.remote.callRemote("checkDucks")
d.addCallback(self.phase4).addErrback(log.err)
def phase4(self, dummy):
self.pond.removeDuck("one duck")
self.pond.count()
self.remote.callRemote("checkDucks")
d = self.remote.callRemote("ignorePond")
d.addCallback(self.phase5)
def phase5(self, dummy):
d = self.remote.callRemote("shutdown")
d.addCallback(self.phase6)
def phase6(self, dummy):
reactor.stop()
def main():
master = MasterDuckPond(["one duck", "two duck"])
master.count()
sender = Sender(master)
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", 8800, factory)
deferred = factory.getRootObject()
deferred.addCallback(sender.phase1)
reactor.run()
if __name__ == '__main__':
main()
cache_receiver.py:
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application import service, internet
from twisted.internet import reactor
from twisted.spread import pb
import cache_classes
class Receiver(pb.Root):
def remote_takePond(self, pond):
self.pond = pond
print "got pond:", pond # a DuckPondCache
self.remote_checkDucks()
def remote_checkDucks(self):
print "[%d] ducks: " % self.pond.count(), self.pond.getDucks()
def remote_ignorePond(self):
# stop watching the pond
print "dropping pond"
# gc causes __del__ causes 'decache' msg causes stoppedObserving
self.pond = None
def remote_shutdown(self):
reactor.stop()
application = service.Application("copy_receiver")
internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent(
service.IServiceCollection(application))
This example seems pretty straightforward, the MasterDuckPond is controlled by the sending side, and the SlaveDuckPond is a cache that tracks changes to the master.
However, how would I go about updating/caching an entire list of instanced objects?
Don't use PB. The protocol is overly complicated for at least 99% of use-cases (meaning you will have to work a lot harder than necessary to understand, implement, and maintain your project). There are no other implementations of it and there probably never will be (which means you're stuck with Python and Twisted - which is not to say those things are bad, but there are a lot of other things out there that may also be good). The level of maintenance is minimal (so if you find bugs, there's a small chance someone will help you get them fixed but that's about all you can expect - and you may not even get that if the bugs aren't trivial).
Give HTTP a try. It can do a lot. If you combine it with a data format like Capn, CBOR, or even JSON it can do even more.
I am having an issue closing excel after using Dispatch.
import openpyxl
import os
from win32com import client
class CTAutomation:
def __init__(self, file):
self.invoice = xl.load_workbook(os.getcwd() + "\Templates\ctrates.xlsx")
self.xlTemplate = xl.load_workbook(os.getcwd() + "\Templates\invoiceTemplate.xlsx")
self.vpc = xl.load_workbook(os.getcwd() + "\Templates\Vpc.xlsx")
self.file = file
def invoice_make(self):
self.xlApp = client.Dispatch("Excel.Application")
self.xlbook = self.xlApp.Workbooks.Open(os.getcwd() + '\TestFiles\\' + self.file)
self.ws = self.xlbook.Worksheets[0]
self.ws.Visible = 1
self.ws.ExportAsFixedFormat(0, os.getcwd() + "\complitedpdf\\" + self.file + ".pdf")
self.quit()
def quit(self):
self.xlbook.Close()
self.xlApp.Quit()
def xlformater(self):
return None
def main():
pwd = os.listdir(os.getcwd() + "\TestFiles")
for file in pwd:
CTAutomation(file.strip(".xlsx")).invoice_make()
if __name__ == "__main__":
main()
all works well till this part. i have found a few posts about this topic in the forum but i feel that im still missing something to close the app,
.xlsx and xls(Latest Versions) to pdf using python in example
some advice would be much appreciated .
Essentially it is your class object persisting in memory. Consider wrapping the process in a context manager using with(). And call the invoice_make() within the context.
Additionally, you had an incorrect Excel method by indexing workbook by zero with square brackets.
Finally, consider using os.path.join() to aviod back or forward slashes and use a try/except block to catch COM exceptions and properly release objects from memory.
import openpyxl as xl
import os
from win32com import client
cwd = os.getcwd()
class CTAutomation:
def __init__(self):
self.invoice = xl.load_workbook(os.path.join(cwd, "Templates", "ctrates.xlsx"))
self.xlTemplate = xl.load_workbook(os.path.join(cwd, "Templates", "invoiceTemplate.xlsx"))
self.vpc = xl.load_workbook(os.path.join(cwd, "Templates", "Vpc.xlsx"))
def invoice_make(self, file):
try:
self.xlApp = client.Dispatch("Excel.Application")
self.xlbook = self.xlApp.Workbooks.Open(os.path.join(cwd, "TestFiles", file))
self.ws = self.xlbook.Worksheets(1) # USE PARENTHESES (NOT BRACKETS AND NON-ZERO INDEX)
#self.ws.Visible = 1 # KEEP PROCESS IN BACKGROUND
self.ws.ExportAsFixedFormat(0, os.path.join(cwd, "complitedpdf", file.replace(".xlsx",".pdf")))
self.xlbook.Close(False)
self.xlApp.Quit()
except Exception as e:
print(e)
finally:
self.ws = None # RELEASE EXCEL OBJS FROM MEMORY
self.xlbook = None
self.xlApp = None
def xlformater(self):
return None
def __enter__(self):
return self # BOUND TO as IN with()
def __exit__(self, *err):
return None
def main():
pwd = os.listdir(os.path.join(cwd, "TestFiles"))
with CTAutomation() as obj: # CONTEXT MANAGER
for file in pwd:
print(file)
obj.invoice_make(file)
if __name__ == "__main__":
main()
I'm having a strange phenomena in Python with callback functions and handlers.
I use ZMQ to handle my communication and use a stream for the socket. I have the base class:
import multiprocessing
import zmq
from concurrent.futures import ThreadPoolExecutor
from zmq.eventloop import ioloop, zmqstream
from zmq.utils import jsonapi as json
# Types of messages
TYPE_A = 'type_a'
TYPE_B = 'type_b'
class ZmqProcess(multiprocessing.Process):
def __init__(self):
super(ZmqProcess, self).__init__()
self.context = None
self.loop = None
self.handle_stream = None
def setup(self):
self.context = zmq.Context()
self.loop = ioloop.IOLoop.instance()
def send(self, msg_type, msg, host, port):
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://%s:%s' % (host, port))
sock.send_json([msg_type, msg])
def stream(self, sock_type, addr):
sock = self.context.socket(sock_type)
if isinstance(addr, str):
addr = addr.split(':')
host, port = addr if len(addr) == 2 else (addr[0], None)
if port:
sock.bind('tcp://%s:%s' % (host, port))
else:
port = sock.bind_to_random_port('tcp://%s' % host)
stream = zmqstream.ZMQStream(sock, self.loop)
return stream, int(port)
class MessageHandler(object):
def __init__(self, json_load=-1):
self._json_load = json_load
self.pool = ThreadPoolExecutor(max_workers=10)
def __call__(self, msg):
i = self._json_load
msg_type, data = json.loads(msg[i])
msg[i] = data
if msg_type.startswith('_'):
raise AttributeError('%s starts with an "_"' % msg_type)
getattr(self, msg_type)(*msg)
And I have a class that inherits from it:
import zmq
import zmq_base
class ZmqServerMeta(zmq_base.ZmqProcess):
def __init__(self, bind_addr, handlers):
super(ZmqServerMeta, self).__init__()
self.bind_addr = bind_addr
self.handlers = handlers
def setup(self):
super(ZmqServerMeta, self).setup()
self.handle_stream, _ = self.stream(zmq.PAIR, self.bind_addr)
self.handle_stream.on_recv(StreamHandler(self.handle_stream, self.stop,
self.handlers))
def run(self):
self.setup()
self.loop.start()
def stop(self):
self.loop.stop()
class StreamHandler(zmq_base.MessageHandler):
def __init__(self, handle_stream, stop, handlers):
super(StreamHandler, self).__init__()
self._handle_stream = handle_stream
self._stop = stop
self._handlers = handlers
def type_a(self, data):
if zmq_base.TYPE_A in self._handlers:
if self._handlers[zmq_base.TYPE_A]:
for handle in self._handlers[zmq_base.TYPE_A]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def type_b(self, data):
if zmq_base.TYPE_B in self._handlers:
if self._handlers[zmq_base.TYPE_B]:
for handle in self._handlers[zmq_base.TYPE_B]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def endit(self):
self._stop()
Additionally, I have a class that I want to use as storage. And here is where the trouble starts:
import threading
import zmq_server_meta as server
import zmq_base as base
class Storage:
def __init__(self):
self.list = []
self.list_lock = threading.RLock()
self.zmq_server = None
self.host = '127.0.0.1'
self.port = 5432
self.bind_addr = (self.host, self.port)
def setup(self):
handlers = {base.TYPE_A: [self. remove]}
self.zmq_server = server.ZmqServerMeta(handlers=handlers, bind_addr=self.bind_addr)
self.zmq_server.start()
def add(self, data):
with self.list_lock:
try:
self.list.append(data)
except:
print "Didn't work"
def remove(self, msg):
with self.list_lock:
try:
self.list.remove(msg)
except:
print "Didn't work"
The idea is that that class stores some global information that it receives.
It is all started in a file to test:
import sys
import time
import storage
import zmq_base as base
import zmq_server_meta as server
def printMsg(msg):
print msg
store = storage.Storage()
store.setup()
handlers = {base.TYPE_B: [printMsg]}
client = server.ZmqServerMeta(handlers=handlers, bind_addr=('127.0.0.1', 5431))
client.start()
message = "Test"
store.add(message)
client.send(base.TYPE_A, message, '127.0.0.1', 5432)
I simplified it to reduce clutter. Instead of just adding it, it is usually send and then a response comes back. The response, the client sending, should be processed by the correct callback, remove(), and it should remove something out of the list. The problem that occurs is, that the remove() function sees an empty list, although there should be an element in the list. If I check from the testing file, I can see the element after it was added, and if I call remove() from there, I see a non-empty list and can remove it. My question is, why does the callback sees an empty list and how can I make sure it does see the correct elements in the list?
Kind regards
Patrick
I believe the problem lays in the fact that the ZmqProcess class inherits from multiprocessing.Process. Multiprocessing does not allow to share objects among different processes, except by using a shared memory map using Value or Array ( as can be seen in the documentation: https://docs.python.org/3/library/multiprocessing.html#sharing-state-between-processes )
If you want to use your custom object, you can use a Server process / proxy object, which can be found in on the same page of the documentation.
So you can, for instance, define a manager in the init function of the Storage class like: self.manager = Manager() Afterwards you put self.list = self.manager.list(). This should do the trick.
I need to pass a kwarg to the parent class of my equivalent of FingerFactoryFromService using super.
I know I am actually passing the kwarg to IFingerFactory because that is also where I pass the service that ends up in init FingerFactoryFromService and I can understand that it is getting tripped up somewhere in the component system but I cannot think of any other way.
The error I keep getting is
exceptions.TypeError: 'test' is an invalid keyword argument for this function
Versions of code in my virtualenv are:
pip (1.4.1)
setuptools (1.1.6)
Twisted (13.1.0)
wsgiref (0.1.2)
zope.interface (4.0.5)
This is a cutdown example from the finger tutorial demonstrating the issue:
from twisted.protocols import basic
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.python import components
from zope.interface import Interface, implements # #UnresolvedImport
class IFingerService(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def getUsers(): # #NoSelf
"""
Return a deferred returning a list of strings.
"""
class IFingerFactory(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def buildProtocol(addr): # #NoSelf
"""
Return a protocol returning a string.
"""
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value + '\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class FingerService(service.Service):
implements(IFingerService)
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
for line in file(self.filename):
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read) # #UndefinedVariable
def getUser(self, user):
print user
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
class FingerFactoryFromService(protocol.ServerFactory):
implements(IFingerFactory)
protocol = FingerProtocol
#def __init__(self, srv):
def __init__(self, srv, test=None):
self.service = srv
## I need to call super here because my equivalent of ServerFactory requires
## a kwarg but this cutdown example doesnt so I just assign it to a property
# super(FingerFactoryFromService, self).__init__(test=test)
self.test_thing = test or 'Default Something'
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(FingerFactoryFromService,
IFingerService,
IFingerFactory)
application = service.Application('finger')
serviceCollection = service.IServiceCollection(application)
finger_service = FingerService('/etc/passwd')
finger_service.setServiceParent(serviceCollection)
#line_finger_factory = IFingerFactory(finger_service)
line_finger_factory = IFingerFactory(finger_service, test='Something')
line_finger_server = internet.TCPServer(1079, line_finger_factory)
line_finger_server.setServiceParent(serviceCollection)
This has nothing to do with the component system. What you want to do is override the Factory's buildProtocol method, as documented here:
https://twistedmatrix.com/documents/current/core/howto/servers.html#auto9