i have no idea on what these changes do

This commit is contained in:
Good Evening 2018-07-28 15:21:15 +03:00
parent 81b57337f6
commit f361d2990d
18 changed files with 265 additions and 178 deletions

View File

@ -1,33 +1,8 @@
#!/usr/bin/python3 #!/usr/bin/python3
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import json
import yaml import yaml
import logging
from collections import namedtuple
def namedtupledict(*a, **kw):
nt = namedtuple(*a, **kw)
def getitem(self, key):
try:
if type(key) == str:
return getattr(self, key)
return tuple.__getitem__(self, key)
except Exception:
raise Exception("Could not get %s" % key)
def ntiter(self):
for name in self._fields:
yield name, getattr(self, name)
nt.__iter__ = ntiter
nt.__getitem__ = getitem
return nt
cnf = {} cnf = {}
# dafuq
with open('data/config.yaml') as config_file: with open('data/config.yaml') as config_file:
cnf = json.loads(json.dumps(yaml.load(config_file)), object_hook=lambda d: namedtupledict('X', d.keys())(*d.values())) cnf = yaml.load(config_file)

142
README.md
View File

@ -1,5 +1,145 @@
# medved # medved
alpha beta whatever
partly works
to setup, update token and posting channels in docker/confd/templates/config.json.tmpl. you may also choose which plugins to use and tune their options. ## roadmap
refactor README and lib/plugin/plugins/*
cleanup *
probably Listener shoul be part of Core in order to supervise everything
tasks don't store results currently. need to implement some validation of performed tasks (at least in Executor thread before spreading new tasks)
http://python-rq.org/docs/results/
## configuration
`data/config.yaml`
```
# lists top-level services (like listeners, executors, data-manager) and pipelines enabled
core:
services: # should point to correct service
- data_manager
- rq_executor
pipelines:
- ftp
# describes top-level services and their configurations
services:
data_manager:
# REQUIRED package name, just like path to file with dots
package: lib.data.Manager
# REQUIRED class inherited from Service (lib.Service)
service: DataManager
# REQUIRED used to select one of storages
data:
id: pool
# there can be more service-specific configuration fields
# for now they can be found in code :)
sources:
- random_ip
feeds:
- test_telegram
rq_executor:
package: lib.exeq.Executor
service: RQExecutor
data:
id: pool
redis:
host: "127.0.0.1"
# describes datasources for data_manager
sources:
random_ip:
package: lib.plugin.base.lib.IP
service: RandomIP
data:
id: random_ip
# describes datafeeds for data_manager
feeds:
test_telegram: # doesn't work yet, eh
package: lib.plugin.base.lib.Telegram
service: TelegramFeed
data:
id: pool
token:
chats:
- id: good_evening
pipelines: [ftp, gopher]
filter:
clause: any-of
equal:
- ftp_list_files_status: success
- gopher_collect_status: success
# describes various storages, e.g. data pool for pipelines or queues for datasources
storage:
pool:
# REQUIRED
package: lib.plugin.base.lib.Mongo
service: MongoStorage
size: 40960
# service-specific
db: "medved"
coll: 'pool'
random_ip:
package: lib.plugin.base.lib.Mongo
service: MongoStorage
size: 500
db: "medved"
coll: 'randomipsource'
# describes available pipelines
pipelines:
ftp:
# list of steps with dependencies
steps:
# will pass 10 items to lib.plugin.iscan.tasks.common.scan
- name: scan
package: lib.plugin.iscan.tasks.common
service: scan
multiple: 10 # default: False
requires: []
# will pass 1 item marked with ftp_scan to lib.plugin.iscan.tasks.ftp.connect
- name: connect
package: lib.plugin.iscan.tasks.ftp
service: connect
requires:
- ftp_scan
- name: list_files
package: lib.plugin.iscan.tasks.ftp
service: list_files
requires:
- ftp_connect
# various configurations for tasks
tasks:
ftp_scan:
ports:
- 21
ftp_connect:
logins: data/ftp/logins.txt
passwords: data/ftp/passwords.txt
bruteforce: true
timeout: 15
ftp_list_files:
logging:
Storage: INFO
```
probably it can be launched with docker, however I didn't test it yet
run `make base && docker-compose up --build --scale worker=5` run `make base && docker-compose up --build --scale worker=5`
or simply `python medved.py`
you'll need working redis and mongodb for default configuration
## top-level services
### lib.data.Manager.DataManager
Orchestrates datasources and datafeeds - starts and stops them, also checks pool size. If it is too low - takes data from DS.
### lib.exeq.Executor.RQExecutor
Should run pipelines described in configuration. Works via [RedisQueue](http://python-rq.org/), so needs some Redis up and running
Basically takes data from pool and submits it to workers.
RQ workers should be launched separately (`rqworker worker` from code root)

View File

@ -1,46 +0,0 @@
import json, yaml
import sys, os
import importlib
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from Config import namedtupledict
from lib import Logger, Loader
logger = Logger('Linter')
logger.info('Reading config')
with open('data/config.yaml', 'r') as config:
data = yaml.load(config)
cnf = json.loads(json.dumps(data), object_hook=lambda d: namedtupledict('X', d.keys())(*d.values()))
logger.info("Config: \n%s", json.dumps(data, indent=2))
logger.info('CORE')
services_check = ["datamgr", "listener", "executor"]
services = []
logger.info("Checking services: %s", ", ".join(services))
for s, sv in cnf.core:
services.append(s)
print(services)
for s in services_check:
if not s in services:
logger.error("Service %s is not defined!", s)
else:
logger.info("Service %s is defined, continuing", s)
for k, v in cnf.core[s]:
if k == 'package':
try:
importlib.import_module(v)
except Exception as e:
logger.error("Unable to load package %s!\n%s", v, e)
else:
logger.info("Package %s was imported successfully", v)
elif k == 'service':
try:
Loader(cnf.core[s]['package']).get(v)
except Exception as e:
logger.error("Unable to load package %s!\n%s", v, e)
else:
logger.info("Service %s was imported successfully", v)

View File

@ -1,8 +1,11 @@
Core: ---
dsl_version: 1
core:
services: services:
- data_manager - data_manager
# - zmq_listener # - zmq_listener
# - rq_executor - rq_executor
pipelines: pipelines:
- ftp - ftp
@ -11,7 +14,6 @@ services:
data_manager: data_manager:
package: lib.data.Manager package: lib.data.Manager
service: DataManager service: DataManager
oneshot: 500
data: data:
id: pool id: pool
sources: sources:
@ -40,8 +42,6 @@ sources:
service: RandomIP service: RandomIP
data: data:
id: random_ip id: random_ip
oneshot: 100
delay: 0.5
feeds: feeds:
@ -59,7 +59,6 @@ feeds:
equal: equal:
- ftp_list_files_status: success - ftp_list_files_status: success
- gopher_collect_status: success - gopher_collect_status: success
delay: 10
storage: storage:
@ -67,14 +66,12 @@ storage:
package: lib.plugin.base.lib.Mongo package: lib.plugin.base.lib.Mongo
service: MongoStorage service: MongoStorage
size: 40960 size: 40960
url: "127.0.0.1"
db: "medved" db: "medved"
coll: 'pool' coll: 'pool'
random_ip: random_ip:
package: lib.plugin.base.lib.Mongo package: lib.plugin.base.lib.Mongo
service: MongoStorage service: MongoStorage
size: 500 size: 500
url: "127.0.0.1"
db: "medved" db: "medved"
coll: 'randomipsource' coll: 'randomipsource'
@ -85,29 +82,31 @@ pipelines:
- name: scan - name: scan
package: lib.plugin.iscan.tasks.common package: lib.plugin.iscan.tasks.common
service: scan service: scan
multiple: 200 multiple: 10
requires: [] requires: []
- name: connect - name: connect
package: lib.plugin.iscan.tasks.ftp package: lib.plugin.iscan.tasks.ftp
service: connect service: connect
multiple: False multiple: False
requires: [ requires:
ftp_scan - ftp_scan
]
- name: list_files - name: list_files
package: lib.plugin.iscan.tasks.ftp package: lib.plugin.iscan.tasks.ftp
service: list_files service: list_files
multiple: False multiple: False
requires: [ requires:
ftp_connect - ftp_connect
]
tasks: tasks:
ftp_scan: ftp_scan:
ports: [21] ports:
- 21
ftp_connect: ftp_connect:
logins: data/ftp/logins.txt logins: data/ftp/logins.txt
passwords: data/ftp/passwords.txt passwords: data/ftp/passwords.txt
bruteforce: true bruteforce: true
timeout: 15 timeout: 15
ftp_list_files: ftp_list_files:
logging:
Storage: INFO

View File

@ -9,7 +9,7 @@ class Service(Loadable):
def __init__(self, thread, id, root=cnf): def __init__(self, thread, id, root=cnf):
super().__init__(id, root) super().__init__(id, root)
self._data = Loader.by_id('storage', self.lcnf.data.id) self._data = Loader.by_id('storage', self.lcnf.get("data").get("id"))
self._stop_timeout = 10 self._stop_timeout = 10
self._running = False self._running = False

View File

@ -9,10 +9,10 @@ class DataManager(Service):
self._logger.add_field('service', 'DataManager') self._logger.add_field('service', 'DataManager')
self.sources = {} self.sources = {}
for s in self.lcnf.sources: for s in self.lcnf.get("sources"):
self.attach_source(s) self.attach_source(s)
self.feeds = {} self.feeds = {}
for f in self.lcnf.feeds: for f in self.lcnf.get("feeds"):
self.attach_feed(f) self.attach_feed(f)
def _pre_start(self): def _pre_start(self):
@ -46,12 +46,13 @@ class DataManager(Service):
return self.feeds.get(name) return self.feeds.get(name)
def __run(self): def __run(self):
oneshot = self.lcnf.get("oneshot", 500)
while self._running: while self._running:
if self._data.count() < self.lcnf.oneshot: if self._data.count() < oneshot:
while self._running and (self._data.count() + self.lcnf.oneshot < self._data.size()): while self._running and (self._data.count() + oneshot < self._data.size()):
self._logger.debug("fill %s OF %s", self._data.count(), self._data.size()) self._logger.debug("fill %s OF %s", self._data.count(), self._data.size())
for _,source in self.sources.items(): for _,source in self.sources.items():
items = source.next(count=self.lcnf.oneshot) items = source.next(count=oneshot)
if items: if items:
self._data.put(items) self._data.put(items)
sleep(1) sleep(1)

View File

@ -7,7 +7,7 @@ class Storage(Loadable):
def __init__(self, id, root): def __init__(self, id, root):
super().__init__(id, root) super().__init__(id, root)
self._size = self.lcnf.size self._size = self.lcnf.get("size")
self._logger = Logger("Storage") self._logger = Logger("Storage")
self._logger.add_field('vname', self.__class__.__name__) self._logger.add_field('vname', self.__class__.__name__)

View File

@ -2,6 +2,8 @@ from lib import Service, Loader, Loadable
from lib.tasks.worker import worker from lib.tasks.worker import worker
from time import sleep
from rq import Queue from rq import Queue
from redis import Redis from redis import Redis
@ -22,24 +24,31 @@ class RQExecutor(Executor):
def __run(self): def __run(self):
while self._running: while self._running:
try: try:
#TODO redis_conn = Redis(host=self.lcnf.get('redis').get('host'))
for pn, pipeline in self.cnf.pipelines:
self._logger.debug("pipeline: %s", pn)
for step in pipeline.steps:
self._logger.debug("step: %s", step.name)
redis_conn = Redis(host=self.lcnf.redis.host)
q = Queue('worker', connection=redis_conn) q = Queue('worker', connection=redis_conn)
if q.count + 1 > self.lcnf.get('size', 100):
sleep(self.lcnf.get('delay', 2))
continue
for pn, pipeline in self.cnf.get("pipelines").items():
self._logger.debug("pipeline: %s", pn)
for step in pipeline['steps']:
self._logger.debug("step: %s", step['name'])
filter = { filter = {
"not_exist": [ "not_exist": [
pn + '_' + step.name pn + '_' + step['name']
],
"exist": [
[tag for tag in step.get("requires")]
] ]
} }
items = [] items = []
if step.multiple != False: multiple = step.get('multiple', False)
items = self._data.get(count=step.multiple, filter=filter) if multiple != False:
items = self._data.get(block=False, count=multiple, filter=filter)
else: else:
items = self._data.get(filter=filter) items = self._data.get(block=False, filter=filter)
for i in items: if items:
q.enqueue(Loader(step.package).get(step.service), i) self._logger.debug("enqueueing %s.%s with %s", step['package'], step['service'], items)
q.enqueue("%s.%s" % (step['package'], step['service']), items)
except Exception as e: except Exception as e:
self._logger.error(e) self._logger.error(e)

View File

@ -23,7 +23,7 @@ class ZMQListener(Listener):
self._running = True self._running = True
self._z_ctx = zmq.Context() self._z_ctx = zmq.Context()
self._z_sck = self._z_ctx.socket(zmq.REP) self._z_sck = self._z_ctx.socket(zmq.REP)
self._z_sck.bind("tcp://%s:%s" % (self.lcnf.listen, self.lcnf.port)) self._z_sck.bind("tcp://%s:%s" % (self.lcnf.get('listen', '127.0.0.1'), self.lcnf.get('port', 12321)))
def _post_stop(self): def _post_stop(self):
self._z_ctx.destroy() self._z_ctx.destroy()

View File

@ -31,7 +31,7 @@ class IPRange(IPSource):
def load_ip_range(self): def load_ip_range(self):
ip_range = [] ip_range = []
with open(self.lcnf.path, "r") as text: with open(self.lcnf.get('path'), "r") as text:
for line in text: for line in text:
try: try:
diap = line.split('-') diap = line.split('-')
@ -42,13 +42,14 @@ class IPRange(IPSource):
except Exception as e: except Exception as e:
raise Exception("Error while adding range {}: {}".format(line, e)) raise Exception("Error while adding range {}: {}".format(line, e))
self._iprange = ip_range self._iprange = ip_range
def __run(self): def __run(self):
npos = 0 npos = 0
apos = 0 apos = 0
while self._running: while self._running:
try: try:
for _ in itertools.repeat(None, self.lcnf.oneshot): for _ in itertools.repeat(None, self.lcnf.get('oneshot', 100)):
if self.lcnf.ordered: if self.lcnf.get('ordered', True):
# put currently selected element # put currently selected element
self._data.put(str(self._iprange[npos][apos])) self._data.put(str(self._iprange[npos][apos]))
# rotate next element through networks and addresses # rotate next element through networks and addresses
@ -59,13 +60,13 @@ class IPRange(IPSource):
if npos + 1 < len(self._iprange): if npos + 1 < len(self._iprange):
npos += 1 npos += 1
else: else:
if self.lcnf.repeat: if self.lcnf.get('repeat', True):
npos = 0 npos = 0
else: else:
self.stop() self.stop()
else: else:
self._data.put(str(random.choice(random.choice(self._iprange)))) self._data.put(str(random.choice(random.choice(self._iprange))))
sleep(self.lcnf.delay) sleep(self.lcnf.get('delay', 0.5))
except Exception as e: except Exception as e:
self._logger.warn(e) self._logger.warn(e)
@ -78,10 +79,10 @@ class RandomIP(IPSource):
while self._running: while self._running:
try: try:
items = [] items = []
for _ in itertools.repeat(None, self.lcnf.oneshot): for _ in itertools.repeat(None, self.lcnf.get("oneshot", 100)):
randomip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff))) randomip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
items.append(self.item(str(randomip))) items.append(self.item(str(randomip)))
self._data.put(items) self._data.put(items)
sleep(self.lcnf.delay) sleep(self.lcnf.get("delay", 0.5))
except Exception as e: except Exception as e:
self._logger.warn(e) self._logger.warn(e)

View File

@ -6,19 +6,35 @@ class MongoStorage(Storage):
"""Mongo storage. Currently the only working correctly.""" """Mongo storage. Currently the only working correctly."""
def __init__(self, id, root): def __init__(self, id, root):
super().__init__(id, root) super().__init__(id, root)
self._client = MongoClient(self.lcnf.url) self._client = MongoClient(self.lcnf.get("url", "127.0.0.1"))
self._db = self._client.get_database(self.lcnf.db) self._db = self._client.get_database(self.lcnf.get("db"))
self._coll = self._db.get_collection(self.lcnf.coll) self._coll = self._db.get_collection(self.lcnf.get("coll"))
self._logger.add_field("db", self.lcnf.db, 7) self._logger.add_field("db", self.lcnf.get("db"), 7)
self._logger.add_field("coll", self.lcnf.coll, 7) self._logger.add_field("coll", self.lcnf.get("coll"), 7)
self._logger.debug("Connecting") self._logger.debug("Connecting")
def count(self): def count(self):
return self._coll.count() return self._coll.count()
def _get(self, block, filter): def _get(self, block, filter):
# TODO cleanup dat BS
if filter is None: if filter is None:
filter = {} filter = {}
ne_tags = {}
e_tags = {}
if filter.get('not_exist'):
tags = []
for ne in filter.get('not_exist'):
tags.append(ne)
ne_tags = {'tags': {'$not': {'$all': tags}}}
del filter['not_exist']
if filter.get('exist'):
tags = []
for e in filter.get('exist'):
tags.append(e)
e_tags = {'tags': {'$all': tags}}
del filter['exist']
filter = {'$and': [ne_tags, e_tags]}
item = self._coll.find_one_and_delete(filter=filter) item = self._coll.find_one_and_delete(filter=filter)
if block: if block:
while not item: while not item:
@ -29,14 +45,14 @@ class MongoStorage(Storage):
def _put(self, item, block): def _put(self, item, block):
if block: if block:
while self.count() + 1 >= self.size(): while self.count() + 1 > self.size():
self._logger.debug('Collection full: %s of %s', self.count(), self.size()) self._logger.debug('Collection full: %s of %s', self.count(), self.size())
sleep(1) sleep(1)
self._coll.insert_one(item) self._coll.insert_one(item)
def _put_many(self, items, block): def _put_many(self, items, block):
if block: if block:
while self.count() + len(items) >= self.size(): while self.count() + len(items) > self.size():
self._logger.debug('Collection full: %s of %s', self.count(), self.size()) self._logger.debug('Collection full: %s of %s', self.count(), self.size())
sleep(1) sleep(1)
self._coll.insert_many(items) self._coll.insert_many(items)

View File

@ -10,14 +10,14 @@ class RemoteFeed(Feed):
def __run(self): def __run(self):
while self._running: while self._running:
try: try:
remote = Remote(self.lcnf.ip, self.lcnf.port) remote = Remote(self.lcnf.get('ip'), self.lcnf.get('port'))
remote.connect() remote.connect()
items = self.get(5) items = self.get(5)
if items: if items:
self._logger.info("Sending %s items" % len(items)) self._logger.info("Sending %s items" % len(items))
remote.run('data.put', {'items': items}) remote.run('data.put', {'items': items})
sleep(self.lcnf.delay) sleep(self.lcnf.get('delay', 10))
except Exception as e: except Exception as e:
self._logger.warn(e) self._logger.warn(e)
@ -27,11 +27,11 @@ class RemoteSource(Source):
super().__init__(self.__run, id, root) super().__init__(self.__run, id, root)
def __run(self): def __run(self):
remote = Remote(self.lcnf.ip, self.lcnf.port) remote = Remote(self.lcnf.get('ip'), self.lcnf.get('port'))
remote.connect() remote.connect()
while self._running: while self._running:
self._logger.debug("Requesting from %s", self.lcnf.ip) self._logger.debug("Requesting from %s", self.lcnf.get('ip'))
rep = remote.run('data.get', {'count': self.lcnf.oneshot}) rep = remote.run('data.get', {'count': self.lcnf.get('oneshot', 100)})
if rep.get('status'): if rep.get('status'):
targets = rep.get("data") targets = rep.get("data")
else: else:
@ -39,4 +39,4 @@ class RemoteSource(Source):
self._logger.debug("Got %s items", len(targets)) self._logger.debug("Got %s items", len(targets))
for t in targets: for t in targets:
self._data.put(t) self._data.put(t)
sleep(self.lcnf.delay) sleep(self.lcnf.get('delay', 10))

View File

@ -12,11 +12,12 @@ class TelegramFeed(Feed):
super().__init__(self.__run, id, root) super().__init__(self.__run, id, root)
def __run(self): def __run(self):
delay = self.lcnf.get("delay", 2)
while self._running: while self._running:
try: try:
for chat in self.lcnf.chats: for chat in self.lcnf.get("chats"):
chat_id = chat.id chat_id = chat['id']
sleep(self.lcnf.delay) sleep(delay)
continue continue
# plugins -> pipelines # plugins -> pipelines
# it is in progress # it is in progress
@ -24,7 +25,7 @@ class TelegramFeed(Feed):
msg = Manager.get_plugin(plugin).Plugin.TelegramMessage(host) msg = Manager.get_plugin(plugin).Plugin.TelegramMessage(host)
msg.run() msg.run()
if msg.data['txt']: if msg.data['txt']:
tbot = telebot.TeleBot(self.lcnf.token, threaded=False) tbot = telebot.TeleBot(self.lcnf.get('token'), threaded=False)
if msg.data['img']: if msg.data['img']:
self._logger.debug("Send IP with img %s:%s to %s" % (host['ip'], host['port'], chat_id)) self._logger.debug("Send IP with img %s:%s to %s" % (host['ip'], host['port'], chat_id))
tbot.send_photo("@" + chat_id, msg.data['img'], caption=msg.data['txt']) tbot.send_photo("@" + chat_id, msg.data['img'], caption=msg.data['txt'])
@ -33,6 +34,6 @@ class TelegramFeed(Feed):
tbot.send_message("@" + chat_id, msg.data['txt']) tbot.send_message("@" + chat_id, msg.data['txt'])
else: else:
self._logger.error('Empty text!') self._logger.error('Empty text!')
sleep(self.lcnf.delay) sleep(delay)
except Exception as e: except Exception as e:
self._logger.warn(e) self._logger.warn(e)

View File

@ -30,11 +30,13 @@ class MasScan:
result = parser.loads(out) result = parser.loads(out)
return result return result
def scan(items, taskid): def scan(items):
gi = GeoIP.open(cnf.geoip_dat, GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE) gi = GeoIP.open(cnf.get("geoip_dat", "/usr/share/GeoIP/GeoIP.dat"), GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE)
logger.debug("Starting scan") logger.debug("Starting scan")
ms = MasScan() ms = MasScan()
hosts = ms.scan(ip_list=[i['data']['ip'] for i in items], port_list=cnf.Tasks[taskid].ports) hosts = ms.scan(ip_list=[i['data']['ip'] for i in items],
port_list=cnf.get("tasks").get('ftp_scan').get("ports"))
logger.debug(hosts)
for h in hosts: for h in hosts:
for port in h['ports']: for port in h['ports']:
host = { host = {

View File

@ -1,4 +1 @@
plugins formed in old plugin format plugins formed in old plugin format
however, currently there is no other format

View File

@ -1,5 +1,6 @@
from lib.plugin import Manager from lib.plugin import Manager
# legacy # legacy
# why legacy?
def worker(host, plugin): def worker(host, plugin):
p = Manager.get_plugin(plugin) p = Manager.get_plugin(plugin)
p.Plugin.Pipeline(host, plugin) p.Plugin.Pipeline(host, plugin)

View File

@ -15,7 +15,7 @@ class Logger(logging.Logger):
self._update() self._update()
def get_level(self, name): def get_level(self, name):
return logging.DEBUG return logging.getLevelName(config.get("logging").get(name, "DEBUG"))
def add_field(self, name, default, size=3): def add_field(self, name, default, size=3):
if not name in self._lf_extra: if not name in self._lf_extra:
@ -84,27 +84,17 @@ class Loader:
def __init__(self, path): def __init__(self, path):
self._path = path self._path = path
self._name = path.split('.')[-1] self._name = path.split('.')[-1]
self._dir = ".".join(path.split('.')[:-1]) # shiiiet # TODO remove # self._dir = ".".join(path.split('.')[:-1])
self._logger = Logger('Loader') self._logger = Logger('Loader')
self._logger.add_field('path', self._path) self._logger.add_field('path', self._path)
def get(self, name): def get(self, name):
sys.path.append( os.path.join( os.path.dirname( __file__ ), '..' )) sys.path.append(os.path.join( os.path.dirname( __file__ ), '..' ))
self._logger.debug('load %s', name) self._logger.debug('load %s', name)
result = importlib.import_module(self._path) result = importlib.import_module(self._path)
return getattr(result, name) return getattr(result, name)
@classmethod @classmethod
def by_id(cls, section, id): def by_id(cls, section, id):
l = cls(config[section][id].package) l = cls(config[section][id].get('package'))
return l.get(config[section][id].service)(id=id, root=config[section]) return l.get(config[section][id].get('service'))(id=id, root=config[section])
"""
section:
package: lib.package
service: Service
id:
qwer: asdf
tyui: ghjk
"""

View File

@ -9,13 +9,14 @@ from lib import Logger, Loader
class Core: class Core:
"""Core class, contains core services (like listeners, executors, datapool)""" """Core class, contains core services (like listeners, executors, datapool)"""
def __init__(self): def __init__(self):
self.cnf = cnf.Core self.cnf = cnf.get("core")
self.logger = Logger("Core") self.logger = Logger("Core")
self.logger.debug("Loading services") self.logger.debug("Loading services")
self._services = [] self._services = []
for service in self.cnf.services:
service = Loader.by_id('services', service) for service_name in self.cnf.get("services"):
service = Loader.by_id('services', service_name)
self._services.append(service) self._services.append(service)
def start(self): def start(self):