add logging

This commit is contained in:
grey-cat-1908 2022-03-29 11:59:20 +03:00
parent 8e6cbd711c
commit e17a8bf0b0
7 changed files with 186 additions and 14 deletions

View file

@ -45,7 +45,6 @@ extensions = [
"attributable"
]
autodoc_default_options = {
'members': True,
'show-inheritance': True

View file

@ -1,17 +1,20 @@
# Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
import logging
import asyncio
from typing import Dict, List, Union, Any
from .models import User, Guild
from .models.app import Shard
from .utils import Snowflake, APIModelBase
from .utils.types import Coro
from .core.http import HTTPClient
from .core.gateway import GatewayBotInfo
from .models.guild.channel import Channel, ChannelType, channel_types_for_converting
from .utils.logging import init_logging
import asyncio
from typing import Dict, List, Union, Any
_logger = logging.getLogger("melisa")
class Client:
@ -22,15 +25,21 @@ class Client:
Parameters
----------
token : :class:`str`
token: :class:`str`
The token to login (you can found it in the developer portal)
intents : :class:`~melisa.Intents`
intents: :class:`~melisa.Intents`
The Discord Intents values.
activity : :class:`~models.user.presence.BotActivity`
activity: :class:`~models.user.presence.BotActivity`
The Activity to set (on connecting)
status : :class:`str`
status: :class:`str`
The Status to set (on connecting).
Can be generated using :class:`~models.user.presence.StatusType`
logs: :class:`Optional[None, str, Dict[str, Any]]`
The hint for configuring logging.
This can be `None` to disable logging automatically.
If you pass a :class:`str` or a :class:`int`, it is interpreted as
the global logging level to use, and should match one of **DEBUG**,
**INFO**, **WARNING**, **ERROR** or **CRITICAL**, if :class:`str`.
Attributes
----------
@ -42,7 +51,15 @@ class Client:
Bot's shards.
"""
def __init__(self, token: str, intents, *, activity=None, status: str = None):
def __init__(
self,
token: str,
intents,
*,
activity=None,
status: str = None,
logs: Union[None, int, str, Dict[str, Any]] = "INFO",
):
self.shards: Dict[int, Shard] = {}
self.http: HTTPClient = HTTPClient(token)
self._events: Dict[str, Coro] = {}
@ -64,6 +81,8 @@ class Client:
APIModelBase.set_client(self)
init_logging(logs)
async def _get_gateway(self):
"""Get Gateway information"""
return GatewayBotInfo.from_dict(await self.http.get("gateway/bot"))
@ -80,6 +99,7 @@ class Client:
raise TypeError(f"<{callback.__qualname__}> must be a coroutine function")
self._events[callback.__qualname__] = callback
_logger.debug(f"Listener {callback.__qualname__} added successfully!")
return self
def run(self) -> None:

View file

@ -2,6 +2,7 @@
# Full MIT License can be found in `LICENSE.txt` at the project root.
import asyncio
import logging
import sys
import zlib
import time
@ -16,6 +17,8 @@ from ..listeners import listeners
from ..models.user import BotActivity
from ..utils import APIModelBase, json
_logger = logging.getLogger("melisa.gateway")
@dataclass
class GatewayBotInfo(APIModelBase):
@ -89,12 +92,14 @@ class Gateway:
self.ws = await self.__session.ws_connect(
f"wss://gateway.discord.gg/?v={self.GATEWAY_VERSION}&encoding=json&compress=zlib-stream"
)
if self.session_id is None:
_logger.debug("(Shard %s) Starting...", self.shard_id)
await self.send_identify()
self.loop.create_task(self.receive())
await self.check_heartbeating()
else:
_logger.debug("(Shard %s) Resumed.", self.shard_id)
await self.resume()
async def check_heartbeating(self):
@ -102,10 +107,15 @@ class Gateway:
await asyncio.sleep(20)
if self._last_send + 60.0 < time.perf_counter():
_logger.warning(
"(Shard %s) ack not received. Attempting to reconnect.",
self.shard_id,
)
await self.ws.close(code=4000)
await self.handle_close(4000)
async def send(self, payload: str) -> None:
_logger.debug("(Shard %s) Sending payload: %s", self.shard_id, payload)
await self.ws.send_str(payload)
async def parse_websocket_message(self, msg):
@ -125,8 +135,18 @@ class Gateway:
async def handle_data(self, data):
"""Handles received data and process it"""
_logger.debug(
"(Shard %s) Data with %s opcode received", self.shard_id, data["op"]
)
if data["op"] == self.DISPATCH:
self.sequence = int(data["s"])
_logger.debug(
"(Shard %s) Set sequence number to %s", self.shard_id, data["s"]
)
event_type = data["t"].lower()
event_to_call = self.listeners.get(event_type)
@ -135,12 +155,26 @@ class Gateway:
ensure_future(event_to_call(self.client, self, data["d"]))
elif data["op"] == self.INVALID_SESSION:
_logger.debug(
"(Shard %s) Invalid session, attempting to reconnect", self.shard_id
)
await self.ws.close(code=4000)
await self.handle_close(4000)
elif data["op"] == self.HELLO:
await self.send_hello(data)
elif data["op"] == self.HEARTBEAT_ACK:
_logger.debug(
"(Shard %s) received heartbeat ack",
self.shard_id,
)
self.latency = time.perf_counter() - self._last_send
elif data["op"] == self.RECONNECT:
_logger.debug(
"(Shard %s) Requested to reconnect to Discord. "
"Closing session and attempting to resume",
self.shard_id,
)
await self.close(1012)
async def receive(self) -> None:
"""Receives and parses received data"""
@ -155,8 +189,10 @@ class Gateway:
raise RuntimeError
close_code = self.ws.close_code
if close_code is None:
return
await self.handle_close(close_code)
async def handle_close(self, code: int) -> None:
@ -169,10 +205,18 @@ class Gateway:
if err:
raise err
_logger.info(
"(Shard %s) disconnected from the Discord Gateway without any errors. Reconnecting...",
self.shard_id,
)
await self.connect()
async def send_heartbeat(self, interval: float) -> None:
if not self.ws.closed:
_logger.debug(
"(Shard %s) sending heartbeat in %sms", self.shard_id, interval
)
await self.send(self.opcode(self.HEARTBEAT, self.sequence))
self._last_send = time.perf_counter()
await asyncio.sleep(interval)
@ -181,6 +225,7 @@ class Gateway:
async def close(self, code: int = 4000) -> None:
if self.ws:
await self.ws.close(code=code)
self._buffer.clear()
async def send_hello(self, data: Dict) -> None:
@ -188,7 +233,22 @@ class Gateway:
await asyncio.sleep((interval - 2000) / 1000)
self.loop.create_task(self.send_heartbeat(interval))
async def send_identify(self) -> None:
async def send_identify(self, resume: bool = False) -> None:
if resume:
_logger.debug("(Shard %s) Resuming connection with Discord", self.shard_id)
await self.send(
self.opcode(
self.RESUME,
{
"token": self.client._token,
"session_id": self.session_id,
"seq": self.sequence,
},
)
)
return
await self.send(self.opcode(self.IDENTIFY, self.auth))
async def resume(self) -> None:
@ -216,6 +276,7 @@ class Gateway:
return data
async def update_presence(self, data: dict):
_logger.debug("[Shard %s] Updating presence...", self.shard_id)
await self.send(self.opcode(self.PRESENCE_UPDATE, data))
@staticmethod

View file

@ -4,6 +4,7 @@
from __future__ import annotations
import asyncio
import logging
from typing import Dict, Optional, Any
from aiohttp import ClientSession, ClientResponse
@ -22,6 +23,8 @@ from melisa.exceptions import (
from .ratelimiter import RateLimiter
from ..utils import remove_none
_logger = logging.getLogger("melisa.http")
class HTTPClient:
API_VERSION = 9
@ -76,6 +79,8 @@ class HTTPClient:
if ttl == 0:
raise ServerError(f"Maximum amount of retries for `{endpoint}`.")
_logger.debug(f"Send {method} request to the {endpoint}")
await self.__rate_limiter.wait_until_not_ratelimited(endpoint, method)
url = f"{self.url}/{endpoint}"
@ -105,12 +110,19 @@ class HTTPClient:
) -> Optional[Dict]:
"""Handle responses from the Discord API."""
_logger.debug(f"Received response for the {endpoint} ({await res.text()})")
self.__rate_limiter.save_response_bucket(endpoint, method, res.headers)
if res.ok:
if res.status == 204:
_logger.debug("Request has been sent successfully.")
return
_logger.debug(
"Request has been sent successfully and returned json response."
)
return await res.json()
exception = self.__http_exceptions.get(res.status)
@ -119,14 +131,30 @@ class HTTPClient:
if isinstance(exception, RateLimitError):
timeout = (await res.json()).get("retry_after", 40)
_logger.exception(
f"You are being ratelimited: {res.reason}."
f" The scope is {res.headers.get('X-RateLimit-Scope')}."
f" I will retry in {timeout} seconds"
)
await asyncio.sleep(timeout)
return await self.__send(method, endpoint, **kwargs)
_logger.error(
f" HTTP exception occurred while trying to send "
f"a request to {endpoint}. ({res.status}, {res.reason})"
)
exception.__init__(res.reason)
raise exception
retry_in = 1 + (self.max_ttl - _ttl) * 2
_logger.debug(
"Discord side error occurred (hahahhaha, discord, fix yourself)."
f" Status-Code: {res.status}. I will retry sending in {retry_in}s."
)
await asyncio.sleep(retry_in)
return await self.__send(method, endpoint, _ttl=_ttl - 1, **kwargs)

View file

@ -3,11 +3,14 @@
from __future__ import annotations
import logging
from asyncio import sleep
from dataclasses import dataclass
from time import time
from typing import Dict, Tuple, Any
_logger = logging.getLogger("melisa.http")
@dataclass
class RateLimitBucket:
@ -16,7 +19,7 @@ class RateLimitBucket:
limit: int
remaining: int
reset: float
reset_after_timestamp: float
reset_after: float
since_timestamp: float
@ -41,10 +44,16 @@ class RateLimiter:
limit=int(header["X-RateLimit-Limit"]),
remaining=int(header["X-RateLimit-Remaining"]),
reset=float(header["X-RateLimit-Reset"]),
reset_after_timestamp=float(header["X-RateLimit-Reset-After"]),
reset_after=float(header["X-RateLimit-Reset-After"]),
since_timestamp=time(),
)
_logger.info(
"Rate limit bucket detected: %s - %r.",
ratelimit_bucket_id,
self.buckets[ratelimit_bucket_id],
)
async def wait_until_not_ratelimited(self, endpoint: str, method: str):
bucket_id = self.bucket_map.get((endpoint, method))
@ -55,4 +64,11 @@ class RateLimiter:
if bucket.remaining == 0:
sleep_time = time() - bucket.since_timestamp + bucket.reset_after_timestamp
_logger.info(
"Waiting until rate limit for bucket %s is over.", sleep_time, bucket_id
)
await sleep(sleep_time)
_logger.info("Message sent. Bucket %s rate limit ended.", bucket_id)

View file

@ -485,7 +485,6 @@ class TextChannel(Channel):
count += 1
if count == 100:
print("abobatelecom")
await self.bulk_delete_messages(message_ids, reason=reason)
message_ids = []
count = 0

49
melisa/utils/logging.py Normal file
View file

@ -0,0 +1,49 @@
# Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
import logging
import logging.config
import sys
from typing import Union, Dict, Any
def init_logging(
flavor: Union[None, str, int, Dict[str, Any]],
) -> None:
"""Attempt to initialize logging for the user.
If any handlers already exist, this is ignored entirely. This ensures the
user can use any existing logging configuration without us interfering.
You can manually disable logging by passing `None` as the `flavor` parameter.
Parameters
----------
flavor: Optional[None, :class:`str`, Dict[:class:`str`, Any]]
This can be `None` to disable logging automatically.
If you pass a :class:`str` or a :class:`int`, it is interpreted as
the global logging level to use, and should match one of **DEBUG**,
**INFO**, **WARNING**, **ERROR** or **CRITICAL**, if :class:`str`.
"""
# This method was found in the hikari source code and modified, thank you, hikari devs
if len(logging.root.handlers) != 0 or flavor is None:
return
if isinstance(flavor, dict):
logging.config.dictConfig(flavor)
if flavor.get("handlers"):
return
flavor = None
logging.logThreads = False
logging.logProcesses = False
logging.basicConfig(
level=flavor,
format="[%(asctime)s] %(name)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stderr,
)
logging.captureWarnings(True)