1
0
Fork 0

improve logging

This commit is contained in:
Massaki Archambault 2024-10-09 19:11:11 -04:00
parent bfd9a94cac
commit 6a13728220
3 changed files with 29 additions and 14 deletions

View File

@ -1,12 +1,19 @@
import argparse import argparse
import os import os
import time import time
import logging
import yaml import yaml
from prometheus_client import start_http_server, Gauge, Counter from prometheus_client import start_http_server, Gauge, Counter
from ecommerce_exporter.scrape_target import ScrapeError, ScrapeTarget from ecommerce_exporter.scrape_target import ScrapeTarget
logging.basicConfig(
format=os.environ.get('LOG_FORMAT', '[%(asctime)s] [%(levelname)-8s] %(message)s'),
level=os.environ.get('LOG_LEVEL', 'INFO')
)
logger = logging.getLogger(__name__)
ECOMMERCE_SCRAPE_TARGET_VALUE = Gauge( ECOMMERCE_SCRAPE_TARGET_VALUE = Gauge(
'ecommerce_scrape_target_value', 'ecommerce_scrape_target_value',
@ -63,19 +70,19 @@ def main():
# setup the headers for each scrape targets # setup the headers for each scrape targets
for scrape_target in scrape_targets: for scrape_target in scrape_targets:
scrape_target.headers = { scrape_target.headers = {
'Accept': '*/*', 'accept': '*/*',
'User-Agent': args.user_agent, 'user-agent': args.user_agent,
} }
# start the http server to server the prometheus metrics # start the http server to server the prometheus metrics
print("serving metrics on http://%s:%s/metrics" % (args.listen_address, args.listen_port)) logger.info("serving metrics on http://%s:%s/metrics", args.listen_address, args.listen_port)
start_http_server(args.listen_port, args.listen_address) start_http_server(args.listen_port, args.listen_address)
# start the main loop # start the main loop
while True: while True:
for scrape_target in scrape_targets: for scrape_target in scrape_targets:
try: try:
print("Starting scrape. product: '%s', target '%s'" % (scrape_target.product_name, scrape_target.target_name)) logger.info("Starting scrape. product: '%s', target '%s'", scrape_target.product_name, scrape_target.target_name)
value = scrape_target.query_target() value = scrape_target.query_target()
ECOMMERCE_SCRAPE_TARGET_VALUE.labels( ECOMMERCE_SCRAPE_TARGET_VALUE.labels(
product_name=scrape_target.product_name, product_name=scrape_target.product_name,
@ -88,7 +95,7 @@ def main():
except KeyboardInterrupt: except KeyboardInterrupt:
return return
except Exception as e: except Exception as e:
print("Failed to scrape! product: '%s', target: '%s', message: '%s'" % (scrape_target.product_name, scrape_target.target_name, e)) logger.error("Failed to scrape! product: '%s', target: '%s', message: '%s'" , scrape_target.product_name, scrape_target.target_name, e)
ECOMMERCE_SCRAPE_TARGET_FAILURE.labels( ECOMMERCE_SCRAPE_TARGET_FAILURE.labels(
product_name=scrape_target.product_name, product_name=scrape_target.product_name,
target_name=scrape_target.target_name, target_name=scrape_target.target_name,
@ -98,7 +105,7 @@ def main():
def parse_config(config_filename): def parse_config(config_filename):
result = [] result = []
print('Loading configurations from %s' % config_filename) logger.info('Loading configurations from %s', config_filename)
with open(config_filename, 'r') as f: with open(config_filename, 'r') as f:
config = yaml.safe_load(f) config = yaml.safe_load(f)

View File

@ -6,7 +6,9 @@ from urllib.parse import urlparse
import httpx import httpx
import parsel import parsel
import pyjq import pyjq
import logging
logger = logging.getLogger(__name__)
class ScrapeTarget: class ScrapeTarget:
def __init__(self, product_name, url, selector, target_name=None, regex=None, parser=None): def __init__(self, product_name, url, selector, target_name=None, regex=None, parser=None):
self.product_name = product_name self.product_name = product_name
@ -16,6 +18,10 @@ class ScrapeTarget:
self.regex = re.compile(regex if regex else r'[0-9]+(\.[0-9]{2})?') self.regex = re.compile(regex if regex else r'[0-9]+(\.[0-9]{2})?')
self.parser = parser if parser else 'html' self.parser = parser if parser else 'html'
self.headers = {} self.headers = {}
self.client = httpx.Client(
follow_redirects=True,
http2=True,
)
# sanity check # sanity check
valid_parsers = ('html', 'json') valid_parsers = ('html', 'json')
@ -25,21 +31,23 @@ class ScrapeTarget:
def query_target(self): def query_target(self):
# some sites get suspicious if we talk to them in HTTP/1.1 (maybe because it doesn't match our user-agent?) # some sites get suspicious if we talk to them in HTTP/1.1 (maybe because it doesn't match our user-agent?)
# we use httpx to have HTTP2 support and circumvent that issue # we use httpx to have HTTP2 support and circumvent that issue
query_response = httpx.get( query_response = self.client.get(
url=self.url, self.url,
headers=self.headers, headers=self.headers,
follow_redirects=True, )
).text logger.info('Status: %s', query_response.status_code)
query_response_text = query_response.text
logger.debug('Response: %s', query_response_text)
# parse the response and match the selector # parse the response and match the selector
selector_match = '' selector_match = ''
if self.parser == 'html': if self.parser == 'html':
# parse response as html # parse response as html
selector = parsel.Selector(text=query_response) selector = parsel.Selector(text=query_response_text)
selector_match = selector.css(self.selector).get() selector_match = selector.css(self.selector).get()
elif self.parser == 'json': elif self.parser == 'json':
# parse response as json # parse response as json
query_response_json = json.loads(query_response) query_response_json = json.loads(query_response_text)
selector_match = str(pyjq.first(self.selector, query_response_json)) selector_match = str(pyjq.first(self.selector, query_response_json))
else: else:
raise ScrapeError('Invalid parser!') raise ScrapeError('Invalid parser!')

View File

@ -16,7 +16,7 @@ setup_requires =
setuptools_scm setuptools_scm
install_requires= install_requires=
PyYAML~=6.0 PyYAML~=6.0
httpx~=0.23.0 httpx[http2]~=0.23.0
parsel~=1.6.0 parsel~=1.6.0
pyjq~=2.6.0 pyjq~=2.6.0
prometheus-client~=0.15.0 prometheus-client~=0.15.0