Use nostr as cache system

This commit is contained in:
koalasat
2024-07-24 16:48:02 +02:00
parent 5c2165fffa
commit ce48b8b7be
9 changed files with 210 additions and 1 deletions

View File

@ -143,6 +143,17 @@ services:
- redis
network_mode: service:tor
strfry:
build: ./strfry
container_name: strfry${SUFFIX}
restart: always
volumes:
- ./strfry/sync.sh:/app/sync.sh
- ${STRFRY_CONF}:/app/strfry.conf
- ${STRFRY_DATA}/db:/app/strfry-db
- ${STRFRY_DATA}/log/:/var/log/
network_mode: service:tor
# Example simple backup service (copy/paste to attached storage locations)
# backup:
# build: ./backup

View File

@ -175,3 +175,6 @@ SLASHED_BOND_REWARD_SPLIT = 0.5
# Username for HTLCs escrows
ESCROW_USERNAME = 'admin'
#Social
NOSTR_NSEC = 'nsec1vxhs2zc4kqe0dhz4z2gfrdyjsrwf8pg3neeqx6w4nl8djfzdp0dqwd6rxh'

View File

@ -28,6 +28,9 @@ POSTGRES_PASSWORD='example'
NGINX_CONFD='./nginx/tn.conf.d'
WELLKNOWN='./nginx/tn.well-known'
STRFRY_CONF='./env-sample/lndtn/strfry.conf'
STRFRY_DATA='/custom_path/testnet/strfry'
# Port and number of HTTP server workers for the robosats backend
WEB_LOCAL_PORT=8001
GUNICORN_WORKERS=2

View File

@ -178,3 +178,6 @@ SLASHED_BOND_REWARD_SPLIT = 0.5
# Username for HTLCs escrows
ESCROW_USERNAME = 'admin'
#Social
NOSTR_NSEC = 'nsec1vxhs2zc4kqe0dhz4z2gfrdyjsrwf8pg3neeqx6w4nl8djfzdp0dqwd6rxh'

View File

@ -0,0 +1,138 @@
##
## Default strfry config
##
# Directory that contains the strfry LMDB database (restart required)
db = "/app/strfry-db/"
dbParams {
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
maxreaders = 256
# Size of mmap() to use when loading LMDB (default is 10TB, does *not* correspond to disk-space used) (restart required)
mapsize = 10995116277760
# Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
noReadAhead = false
}
events {
# Maximum size of normalised JSON, in bytes
maxEventSize = 65536
# Events newer than this will be rejected
rejectEventsNewerThanSeconds = 900
# Events older than this will be rejected
rejectEventsOlderThanSeconds = 94608000
# Ephemeral events older than this will be rejected
rejectEphemeralEventsOlderThanSeconds = 60
# Ephemeral events will be deleted from the DB when older than this
ephemeralEventsLifetimeSeconds = 300
# Maximum number of tags allowed
maxNumTags = 2000
# Maximum size for tag values, in bytes
maxTagValSize = 1024
}
relay {
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
bind = "0.0.0.0"
# Port to open for the nostr websocket protocol (restart required)
port = 7777
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
nofiles = 1000000
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
realIpHeader = ""
info {
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
name = "Robosats"
# NIP-11: Detailed information about relay, free-form
description = "Federation cache system."
# NIP-11: Administrative nostr pubkey, for contact purposes
pubkey = ""
# NIP-11: Alternative administrative contact (email, website, etc)
contact = ""
}
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
maxWebsocketPayloadSize = 131072
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
autoPingSeconds = 55
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
enableTcpKeepalive = false
# How much uninterrupted CPU time a REQ query should get during its DB scan
queryTimesliceBudgetMicroseconds = 10000
# Maximum records that can be returned per filter
maxFilterLimit = 500
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
maxSubsPerConnection = 3
writePolicy {
# If non-empty, path to an executable script that implements the writePolicy plugin logic
plugin = ""
}
compression {
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but slight increase in CPU (restart required)
enabled = true
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
slidingWindow = false
}
logging {
# Dump all incoming messages
dumpInAll = false
# Dump all incoming EVENT messages
dumpInEvents = false
# Dump all incoming REQ/CLOSE messages
dumpInReqs = false
# Log performance metrics for initial REQ database scans
dbScanPerf = false
# Log reason for invalid event rejection? Can be disabled to silence excessive logging
invalidEvents = true
}
numThreads {
# Ingester threads: route incoming requests, validate events/sigs (restart required)
ingester = 3
# reqWorker threads: Handle initial DB scan for events (restart required)
reqWorker = 3
# reqMonitor threads: Handle filtering of new events (restart required)
reqMonitor = 3
# negentropy threads: Handle negentropy protocol messages (restart required)
negentropy = 2
}
negentropy {
# Support negentropy protocol messages
enabled = true
# Maximum records that sync will process before returning an error
maxSyncEvents = 1000000
}
}

14
compose/strfry/Dockerfile Normal file
View File

@ -0,0 +1,14 @@
FROM dockurr/strfry:0.9.6
RUN apk add --no-cache dcron torsocks
RUN echo "TorAddress 127.0.0.1" >> /etc/tor/torsocks.conf
RUN echo "TorPort 9050" >> /etc/tor/torsocks.conf
ADD . /app
COPY entrypoint.sh /etc/strfry/entrypoint.sh
RUN chmod +x /etc/strfry/entrypoint.sh
ENTRYPOINT ["/etc/strfry/entrypoint.sh"]

24
compose/strfry/crontab Normal file
View File

@ -0,0 +1,24 @@
# Edit this file to introduce tasks to be run by cron.
#
# Each task to run has to be defined through a single line
# indicating with different fields when the task will be run
# and what command to run for the task
#
# To define the time you can provide concrete values for
# minute (m), hour (h), day of month (dom), month (mon),
# and day of week (dow) or use '*' in these fields (for 'any').
#
# Notice that tasks will be started based on the cron's system
# daemon's notion of time and timezones.
#
# Output of the crontab jobs (including errors) is sent through
# email to the user the crontab file belongs to (unless redirected).
#
# For example, you can run a backup of all your user accounts
# at 5 a.m every week with:
# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
#
# For more information see the manual pages of crontab(5) and cron(8)
#
# m h dom mon dow command
*/1 * * * * torsocks /app/sync.sh

View File

@ -0,0 +1,5 @@
#!/bin/sh
crontab /app/crontab
crond -f -l 8 & /app/strfry.sh

8
compose/strfry/sync.sh Normal file
View File

@ -0,0 +1,8 @@
#!/bin/sh
filters='{"kinds":[38383]}'
/app/strfry --config /app/strfry.conf sync ws://ngdk7ocdzmz5kzsysa3om6du7ycj2evxp2f2olfkyq37htx3gllwp2yd.onion/nostr --filter "$filters" --dir both >> /var/log/cron.log 2>&1
/app/strfry --config /app/strfry.conf sync ws://satstraoq35jffvkgpfoqld32nzw2siuvowanruindbfojowpwsjdgad.onion/nostr --filter "$filters" --dir both >> /var/log/cron.log 2>&1
/app/strfry --config /app/strfry.conf sync ws://4t4jxmivv6uqej6xzx2jx3fxh75gtt65v3szjoqmc4ugdlhipzdat6yd.onion/nostr --filter "$filters" --dir both >> /var/log/cron.log 2>&1
/app/strfry --config /app/strfry.conf sync ws://mmhaqzuirth5rx7gl24d4773lknltjhik57k7ahec5iefktezv4b3uid.onion/nostr --filter "$filters" --dir both >> /var/log/cron.log 2>&1