Use nostr as cache system

This commit is contained in:
koalasat
2024-07-01 18:00:17 +02:00
parent a6573ba416
commit e4c87feab6
10 changed files with 260 additions and 15 deletions

View File

@ -8,7 +8,7 @@ from django.utils import timezone
from api.lightning.node import LNNode
from api.models import Currency, LNPayment, MarketTick, OnchainPayment, Order
from api.tasks import send_devfund_donation, send_notification
from api.tasks import send_devfund_donation, send_notification, send_order_nostr_event
from api.utils import get_minning_fee, validate_onchain_address, location_country
from chat.models import Message
@ -1208,6 +1208,8 @@ class Logics:
order.save() # update all fields
send_order_nostr_event.delay(order_id=order.id)
order.log(f"Order({order.id},{str(order)}) is public in the order book")
return

View File

@ -0,0 +1,19 @@
# Generated by Django 5.0.6 on 2024-07-01 12:03
import api.models.order
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0047_notification'),
]
operations = [
migrations.AlterField(
model_name='order',
name='reference',
field=models.UUIDField(default=api.models.order.custom_uuid, editable=False),
),
]

46
api/nostr.py Normal file
View File

@ -0,0 +1,46 @@
import time
import pygeohash
from nostr_sdk import Keys, Client, EventBuilder, NostrSigner, Filter
from api.models import Order
from decouple import config
class Nostr:
"""Simple nostr events manager to be used as a cache system for clients"""
async def send_order_event(self, order):
"""Creates the event and sends it to the coordinator relay"""
# Initialize with coordinator Keys
keys = Keys.generate()
signer = NostrSigner.keys(keys)
client = Client(signer)
# Add relays and connect
await client.add_relays(["ws://localhost:888"])
await client.connect()
# Send the event
tags = [
["d", order.id],
["name", order.maker.robot_name],
["k", order.type.lower()],
["f", order.currency],
["s", Order.Status(order.status).label],
["amt", order.last_satoshis],
["fa", order.amount],
["pm", order.payment_method.split(" ")],
["premium", order.premium_percentile],
["source", f"{config("HOST_NAME")}/{config("COORDINATOR_ALIAS")}/order/{order.id}"],
["expiration", order.expires_at.timestamp()],
["y", "robosats"],
["coordinator", config("COORDINATOR_ALIAS", cast=str)]
["z", "order"],
["n", order.network],
["layer", "lightning"],
["g", pygeohash.encode(order.latitude, order.longitude)],
["bond", order.bond]
]
event = EventBuilder(38383, "", tags).to_event(keys)
output = await client.send_event(event)
print(f"Nostr event sent: {output}")

View File

@ -1,3 +1,4 @@
import asyncio
from celery import shared_task
from celery.exceptions import SoftTimeLimitExceeded
@ -251,6 +252,19 @@ def cache_market():
return
@shared_task(name="", ignore_result=True, time_limit=120)
def send_order_nostr_event(order_id=None):
if order_id:
from api.models import Order
from api.nostr import Nostr
order = Order.objects.get(id=order_id)
nostr = Nostr()
coroutine = nostr.send_order_event(order)
loop = asyncio.get_event_loop()
loop.run_until_complete(coroutine)
@shared_task(name="send_notification", ignore_result=True, time_limit=120)
def send_notification(order_id=None, chat_message_id=None, message=None):
if order_id:

View File

@ -163,6 +163,7 @@ services:
ports:
- 8000:8000 # dev frontend build
- 12596:12596 # umbrel frontend
- 888:888 # nostr
lnd:
build: ./docker/lnd
@ -226,6 +227,14 @@ services:
volumes:
- ./node/db:/var/lib/postgresql/data
rnostr:
build: https://github.com/rnostr/rnostr.git
container_name: rnostr-dev
restart: unless-stopped
volumes:
- ./nodeapp/rnostr/config/rnostr.toml:/rnostr/config/rnostr.toml:r
network_mode: service:tor
# # Postgresql for CLN
# postgres-cln:
# image: postgres:14.2-alpine

View File

@ -5,17 +5,17 @@
# docker-compose -f docker-tests.yml --env-file tests/compose.env down --volumes
# docker exec -it btc bitcoin-cli -chain=regtest -rpcpassword=test -rpcuser=test createwallet default
# docker exec -it btc bitcoin-cli -chain=regtest -rpcpassword=test -rpcuser=test -generate 101
# docker exec -it btc-test bitcoin-cli -chain=regtest -rpcpassword=test -rpcuser=test createwallet default
# docker exec -it btc-test bitcoin-cli -chain=regtest -rpcpassword=test -rpcuser=test -generate 101
# docker exec -it coordinator-LND lncli --network=regtest getinfo
# docker exec -it robot-LND lncli --network=regtest --rpcserver localhost:10010 getinfo
# docker exec -it coordinator-LND-test lncli --network=regtest getinfo
# docker exec -it robot-LND-test lncli --network=regtest --rpcserver localhost:10010 getinfo
version: '3.9'
services:
bitcoind:
image: ruimarinho/bitcoin-core:${BITCOIND_VERSION:-24.0.1}-alpine
container_name: btc
container_name: btc-test
restart: always
ports:
- "8000:8000"
@ -50,7 +50,7 @@ services:
coordinator-LND:
image: lightninglabs/lnd:${LND_VERSION:-v0.17.0-beta}
container_name: coordinator-LND
container_name: coordinator-LND-test
restart: always
volumes:
- bitcoin:/root/.bitcoin/
@ -83,7 +83,7 @@ services:
coordinator-CLN:
image: elementsproject/lightningd:${CLN_VERSION:-v24.05}
restart: always
container_name: coordinator-CLN
container_name: coordinator-CLN-test
environment:
LIGHTNINGD_NETWORK: 'regtest'
volumes:
@ -97,7 +97,7 @@ services:
robot-LND:
image: lightninglabs/lnd:${LND_VERSION:-v0.17.0-beta}
container_name: robot-LND
container_name: robot-LND-test
restart: always
volumes:
- bitcoin:/root/.bitcoin/
@ -129,7 +129,7 @@ services:
redis:
image: redis:${REDIS_VERSION:-7.2.1}-alpine
container_name: redis
container_name: redis-test
restart: always
volumes:
- redisdata:/data
@ -141,7 +141,7 @@ services:
args:
DEVELOPMENT: True
image: backend-image
container_name: coordinator
container_name: coordinator-test
restart: always
environment:
DEVELOPMENT: True
@ -171,7 +171,7 @@ services:
postgres:
image: postgres:${POSTGRES_VERSION:-14.2}-alpine
container_name: sql
container_name: sql-test
restart: always
environment:
POSTGRES_PASSWORD: 'example'
@ -179,10 +179,18 @@ services:
POSTGRES_DB: 'postgres'
network_mode: service:bitcoind
rnostr:
build: https://github.com/rnostr/rnostr.git
container_name: rnostr-test
restart: unless-stopped
volumes:
- ./nodeapp/rnostr/config/rnostr.toml:/rnostr/config/rnostr.toml:r
network_mode: service:bitcoind
# celery-worker:
# image: backend-image
# pull_policy: never
# container_name: celery-worker
# container_name: celery-worker-test
# restart: always
# environment:
# DEVELOPMENT: True

View File

@ -0,0 +1,145 @@
# Configuration
# All duration format reference https://docs.rs/duration-str/latest/duration_str/
#
# config relay information
[information]
name = "rnostr"
description = "A high-performance and scalable nostr relay written in Rust."
software = "https://github.com/rnostr/rnostr"
# pubkey = ""
# contact = ""
# config data path
[data]
# the data path (restart required)
# the events db path is $path/events
path = "./data"
# Query filter timeout time, default no timeout.
db_query_timeout = "100ms"
# config network
[network]
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
host = "127.0.0.1"
# Listen port (restart required)
port = 888
# real ip header (default empty)
# ie: cf-connecting-ip, x-real-ip, x-forwarded-for
# real_ip_header = "x-forwarded-for"
# redirect to other site when user access the http index page
# index_redirect_to = "https://example.com"
# heartbeat timeout (default 120 seconds, must bigger than heartbeat interval)
# How long before lack of client response causes a timeout
# heartbeat_timeout = "2m"
# heartbeat interval (default 60 seconds)
# How often heartbeat pings are sent
# heartbeat_interval = "1m"
# config thread (restart required)
[thread]
# number of http server threads (restart required)
# default 0 will use the num of cpus
# http = 0
# number of read event threads (restart required)
# default 0 will use the num of cpus
# reader = 0
[limitation]
# this is the maximum number of bytes for incoming JSON. default 512K
max_message_length = 524288
# total number of subscriptions that may be active on a single websocket connection to this relay. default 20
max_subscriptions = 1
# maximum number of filter values in each subscription. default 10
max_filters = 10
# the relay server will clamp each filter's limit value to this number. This means the client won't be able to get more than this number of events from a single subscription filter. default 300
max_limit = 300
# maximum length of subscription id as a string. default 100
max_subid_length = 100
# for authors and ids filters which are to match against a hex prefix, you must provide at least this many hex digits in the prefix. default 10
min_prefix = 10
# in any event, this is the maximum number of elements in the tags list. default 5000
max_event_tags = 15
# Events older than this will be rejected. default 3 years
max_event_time_older_than_now = 94608000
# Events newer than this will be rejected. default 15 minutes
max_event_time_newer_than_now = 900
# Metrics extension, get the metrics data from https://example.com/metrics?auth=auth_key
[metrics]
enabled = false
# change the auth key
auth = "auth_key"
# Auth extension
[auth]
enabled = false
# # Authenticate the command 'REQ' get event, subscribe filter
# [auth.req]
# # only the list IP are allowed to req
# ip_whitelist = ["127.0.0.1"]
# # only the list IP are denied to req
# ip_blacklist = ["127.0.0.1"]
# # Restrict on nip42 verified pubkey, so client needs to implement nip42 and authenticate success
# pubkey_whitelist = ["xxxxxx"]
# pubkey_blacklist = ["xxxx"]
# # Authenticate the command 'EVENT' write event
# [auth.event]
# ip_whitelist = ["127.0.0.1"]
# ip_blacklist = ["127.0.0.1"]
# # Restrict on nip42 verified pubkey, so client needs to implement nip42 and authenticate success
# pubkey_whitelist = ["xxxxxx"]
# pubkey_blacklist = ["xxxx"]
# # Restrict on event author pubkey, No need nip42 authentication
# event_pubkey_whitelist = ["xxxxxx"]
# event_pubkey_blacklist = ["xxxx"]
# IP Rate limiter extension
[rate_limiter]
enabled = false
# # interval at second for clearing invalid data to free up memory.
# # 0 will be converted to default 60 seconds
# clear_interval = "60s"
# # rate limiter ruler list when write event per user client IP
# [[rate_limiter.event]]
# # name of rate limiter, used by metrics
# name = "all"
# # description will notice the user when rate limiter exceeded
# description = "allow only ten events per minute"
# period = "1m"
# limit = 10
# # only limit for kinds
# # support kind list: [1, 2, 3]
# # kind ranges included(start) to excluded(end): [[0, 10000], [30000, 40000]]
# # mixed: [1, 2, [30000, 40000]]
# kinds = [[0, 40000]]
# # skip when ip in whitelist
# ip_whitelist = ["127.0.0.1"]
# [[rate_limiter.event]]
# name = "kind 10000"
# description = "allow only five write events per minute when event kind between 0 to 10000"
# period = "60s"
# limit = 5
# kinds = [[0, 10000]]
# NIP-45 Count extension
# use carefully. see README.md#count
[count]
enabled = false
# NIP-50 Search extension
# use carefully. see README.md#search
[search]
enabled = false

View File

@ -28,3 +28,4 @@ drf-spectacular==0.27.2
drf-spectacular-sidecar==2024.4.1
django-cors-headers==4.3.1
base91==1.0.1
nostr-sdk==0.32.2

View File

@ -16,7 +16,7 @@ from tests.utils.pgp import sign_message
from tests.utils.trade import Trade
from api.admin import OrderAdmin
from nostr_sdk import Kind, Filter
def read_file(file_path):
"""

View File

@ -5,7 +5,7 @@ from django.urls import reverse
from api.management.commands.clean_orders import Command as CleanOrders
from api.management.commands.follow_invoices import Command as FollowInvoices
from api.models import Order
from api.tasks import follow_send_payment, send_notification
from api.tasks import follow_send_payment, send_notification, send_order_nostr_event
from tests.utils.node import (
add_invoice,
create_address,
@ -156,6 +156,7 @@ class Trade:
wait_nodes_sync()
@patch("api.tasks.send_notification.delay", send_notification)
@patch("api.tasks.send_order_nostr_event.delay", send_order_nostr_event)
def publish_order(self):
# Maker's first order fetch. Should trigger maker bond hold invoice generation.
self.get_order()