From 6c8f91c1fb7e8b0b85b1629eeec167aa38d2b4c8 Mon Sep 17 00:00:00 2001 From: Mohammad Shafiee Date: Mon, 5 Aug 2024 01:48:35 +0200 Subject: [PATCH] refactor --- go.mod | 71 +- go.sum | 164 ++++- internal/bot/telegram_bot.go | 575 ++++++++++++++++ internal/cache/cache.go | 63 ++ internal/config/config.go | 110 +++ internal/data/user.go | 117 ++++ internal/reader/binary_cache.go | 510 ++++++++++++++ internal/reader/binary_cache_test.go | 235 +++++++ internal/reader/reader.go | 262 +++++++ internal/types/file.go | 45 ++ internal/types/user.go | 10 + internal/utils/hashing.go | 28 + internal/utils/helpers.go | 167 +++++ main.go | 986 +-------------------------- templates/player.html | 203 ++++++ 15 files changed, 2573 insertions(+), 973 deletions(-) create mode 100644 internal/bot/telegram_bot.go create mode 100644 internal/cache/cache.go create mode 100644 internal/config/config.go create mode 100644 internal/data/user.go create mode 100644 internal/reader/binary_cache.go create mode 100644 internal/reader/binary_cache_test.go create mode 100644 internal/reader/reader.go create mode 100644 internal/types/file.go create mode 100644 internal/types/user.go create mode 100644 internal/utils/hashing.go create mode 100644 internal/utils/helpers.go create mode 100644 templates/player.html diff --git a/go.mod b/go.mod index 870c210..f1ee8e4 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,76 @@ module webBridgeBot -go 1.20 +go 1.21 + +toolchain go1.22.5 require ( + github.com/celestix/gotgproto v1.0.0-beta18 + github.com/coocood/freecache v1.2.4 github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.1 - github.com/zelenin/go-tdlib v0.7.1 + github.com/joho/godotenv v1.5.1 + github.com/kelseyhightower/envconfig v1.4.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 + go.uber.org/zap v1.27.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 +) + +require ( + github.com/AnimeKaizoku/cacher v1.0.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/glebarez/go-sqlite v1.21.2 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/gotd/td v0.106.0 + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/text v0.16.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde // indirect + modernc.org/libc v1.22.5 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/sqlite v1.23.1 // indirect ) -require golang.org/x/net v0.17.0 // indirect +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/glebarez/sqlite v1.10.0 // indirect + github.com/go-faster/errors v0.7.1 // indirect + github.com/go-faster/jx v1.1.0 // indirect + github.com/go-faster/xor v1.0.0 // indirect + github.com/gotd/ige v0.2.2 // indirect + github.com/gotd/neo v0.1.5 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/segmentio/asm v1.2.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + nhooyr.io/websocket v1.8.11 // indirect + rsc.io/qr v0.2.0 // indirect +) diff --git a/go.sum b/go.sum index d8709b8..7ffae4d 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,164 @@ +github.com/AnimeKaizoku/cacher v1.0.1 h1:rDjeDphztR4h234mnUxlOQWyYAB63WdzJB9zBg9HVPg= +github.com/AnimeKaizoku/cacher v1.0.1/go.mod h1:jw0de/b0K6W7Y3T9rHCMGVKUf6oG7hENNcssxYcZTCc= +github.com/celestix/gotgproto v1.0.0-beta18 h1:7884H/il+mzNreOQ4SqoMa4S5njt3UmGPKZTxPu38fU= +github.com/celestix/gotgproto v1.0.0-beta18/go.mod h1:osZOlN5irPByA0+3IPsZOH+Ibs0tOMSKmIdgGYEBRgE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coocood/freecache v1.2.4 h1:UdR6Yz/X1HW4fZOuH0Z94KwG851GWOSknua5VUbb/5M= +github.com/coocood/freecache v1.2.4/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= +github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= +github.com/glebarez/sqlite v1.10.0 h1:u4gt8y7OND/cCei/NMHmfbLxF6xP2wgKcT/BJf2pYkc= +github.com/glebarez/sqlite v1.10.0/go.mod h1:IJ+lfSOmiekhQsFTJRx/lHtGYmCdtAiTaf5wI9u5uHA= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-faster/jx v1.1.0 h1:ZsW3wD+snOdmTDy9eIVgQdjUpXRRV4rqW8NS3t+20bg= +github.com/go-faster/jx v1.1.0/go.mod h1:vKDNikrKoyUmpzaJ0OkIkRQClNHFX/nF3dnTJZb3skg= +github.com/go-faster/xor v0.3.0/go.mod h1:x5CaDY9UKErKzqfRfFZdfu+OSTfoZny3w5Ak7UxcipQ= +github.com/go-faster/xor v1.0.0 h1:2o8vTOgErSGHP3/7XwA5ib1FTtUsNtwCoLLBjl31X38= +github.com/go-faster/xor v1.0.0/go.mod h1:x5CaDY9UKErKzqfRfFZdfu+OSTfoZny3w5Ak7UxcipQ= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/zelenin/go-tdlib v0.7.1 h1:szKkr3G7yG9kiw2IXt0lFyZ4JgBt82SbOmUqsVqJMqA= -github.com/zelenin/go-tdlib v0.7.1/go.mod h1:yqNbNZenZtXPKgf9hDuyZbsRz7qlxOxdfKOc+sAxxIE= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +github.com/gotd/ige v0.2.2 h1:XQ9dJZwBfDnOGSTxKXBGP4gMud3Qku2ekScRjDWWfEk= +github.com/gotd/ige v0.2.2/go.mod h1:tuCRb+Y5Y3eNTo3ypIfNpQ4MFjrnONiL2jN2AKZXmb0= +github.com/gotd/neo v0.1.5 h1:oj0iQfMbGClP8xI59x7fE/uHoTJD7NZH9oV1WNuPukQ= +github.com/gotd/neo v0.1.5/go.mod h1:9A2a4bn9zL6FADufBdt7tZt+WMhvZoc5gWXihOPoiBQ= +github.com/gotd/td v0.106.0 h1:mtTEK3UEVwgtVj4pfqspg7TN3I3pmuC2W9vnpipA3Bc= +github.com/gotd/td v0.106.0/go.mod h1:rHtaG0hd4EY0ice4f9CVH/JxsA7ZICqkcH3aFSVZplg= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg= +gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM= +modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY= +rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs= diff --git a/internal/bot/telegram_bot.go b/internal/bot/telegram_bot.go new file mode 100644 index 0000000..6678d01 --- /dev/null +++ b/internal/bot/telegram_bot.go @@ -0,0 +1,575 @@ +package bot + +import ( + "database/sql" + "encoding/json" + "fmt" + "html/template" + "io" + "log" + "net/http" + "os" + "strconv" + "strings" + "webBridgeBot/internal/data" + "webBridgeBot/internal/reader" + + "github.com/celestix/gotgproto" + "github.com/celestix/gotgproto/dispatcher" + "github.com/celestix/gotgproto/dispatcher/handlers" + "github.com/celestix/gotgproto/dispatcher/handlers/filters" + "github.com/celestix/gotgproto/ext" + "github.com/celestix/gotgproto/sessionMaker" + "github.com/celestix/gotgproto/storage" + gtypes "github.com/celestix/gotgproto/types" + "github.com/glebarez/sqlite" + "github.com/gorilla/mux" + "github.com/gorilla/websocket" + "github.com/gotd/td/tg" + "webBridgeBot/internal/config" + "webBridgeBot/internal/types" + "webBridgeBot/internal/utils" +) + +const ( + callbackResendToPlayer = "cb_ResendToPlayer" + tmplPath = "templates/player.html" +) + +// TelegramBot represents the main bot structure. +type TelegramBot struct { + config *config.Configuration + tgClient *gotgproto.Client + tgCtx *ext.Context + logger *log.Logger + userRepository *data.UserRepository + db *sql.DB +} + +var ( + wsClients = make(map[int64]*websocket.Conn) + + upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + return true + }, + } +) + +// NewTelegramBot creates a new instance of TelegramBot. +func NewTelegramBot(config *config.Configuration) (*TelegramBot, error) { + dsn := fmt.Sprintf("file:%s?mode=rwc", config.DatabasePath) + tgClient, err := gotgproto.NewClient( + config.ApiID, + config.ApiHash, + gotgproto.ClientTypeBot(config.BotToken), + &gotgproto.ClientOpts{ + InMemory: true, + Session: sessionMaker.SqlSession(sqlite.Open(dsn)), + }) + if err != nil { + return nil, fmt.Errorf("failed to initialize Telegram client: %w", err) + } + + logger := log.New(os.Stdout, "TelegramBot: ", log.Ldate|log.Ltime|log.Lshortfile) + + // Initialize the database connection + db, err := sql.Open("sqlite", dsn) + if err != nil { + return nil, fmt.Errorf("failed to open SQLite database: %w", err) + } + + // Create a new UserRepository + userRepository := data.NewUserRepository(db) + + // Initialize the database schema + if err := userRepository.InitDB(); err != nil { + return nil, err + } + + return &TelegramBot{ + config: config, + tgClient: tgClient, + tgCtx: tgClient.CreateContext(), + logger: logger, + userRepository: userRepository, + db: db, + }, nil +} + +// Run starts the Telegram bot and web server. +func (b *TelegramBot) Run() { + b.logger.Printf("Starting Telegram bot (@%s)...\n", b.tgClient.Self.Username) + + b.registerHandlers() + + go b.startWebServer() + + if err := b.tgClient.Idle(); err != nil { + b.logger.Fatalf("Failed to start Telegram client: %s", err) + } +} + +func (b *TelegramBot) registerHandlers() { + clientDispatcher := b.tgClient.Dispatcher + clientDispatcher.AddHandler(handlers.NewCommand("start", b.handleStartCommand)) + clientDispatcher.AddHandler(handlers.NewCommand("authorize", b.handleAuthorizeUser)) + clientDispatcher.AddHandler(handlers.NewCallbackQuery(filters.CallbackQuery.Prefix("cb_"), b.handleCallbackQuery)) + clientDispatcher.AddHandler(handlers.NewAnyUpdate(b.handleAnyUpdate)) + clientDispatcher.AddHandler(handlers.NewMessage(filters.Message.Video, b.handleVideoMessages)) +} + +func (b *TelegramBot) handleStartCommand(ctx *ext.Context, u *ext.Update) error { + chatID := u.EffectiveChat().GetID() + user := u.EffectiveUser() + + b.logger.Printf("Processing /start command from user: %s (ID: %d) in chat: %d\n", user.FirstName, user.ID, chatID) + + // Check if the user already exists in the database + existingUser, err := b.userRepository.GetUserInfo(user.ID) + if err != nil { + b.logger.Printf("Failed to retrieve user info: %v", err) + } + + // Check if the user is the first user in the database + isFirstUser, err := b.userRepository.IsFirstUser() + if err != nil { + b.logger.Printf("Failed to check if user is first: %v", err) + } + + isAdmin := false + isAuthorized := false + + // If the user doesn't exist or is the first user, store user info or update their record + if existingUser == nil || isFirstUser { + if isFirstUser { + isAuthorized = true + isAdmin = true + b.logger.Printf("User %d is the first user and has been automatically granted admin rights.", user.ID) + } + + err = b.userRepository.StoreUserInfo(user.ID, chatID, user.FirstName, user.LastName, user.Username, isAuthorized, isAdmin) + if err != nil { + b.logger.Printf("Failed to store user info: %v", err) + } + + // Notify admins if the user is not an admin + if !isAdmin { + go b.notifyAdminsAboutNewUser(user) + } + } + + webURL := fmt.Sprintf("%s/%d", b.config.BaseURL, chatID) + + msg := fmt.Sprintf( + "Hello %s, I am @%s, your bridge between Telegram and the Web!\n"+ + "You can forward media to this bot, and I will play it on your web player instantly.\n"+ + "Click on 'Open Web URL' below or access your player here: %s", + user.FirstName, ctx.Self.Username, webURL, + ) + + return b.sendMediaURLReply(ctx, u, msg, webURL) +} + +// notifyAdminsAboutNewUser sends a notification to all admins about the new user. +func (b *TelegramBot) notifyAdminsAboutNewUser(newUser *tg.User) { + admins, err := b.userRepository.GetAllAdmins() + if err != nil { + b.logger.Printf("Failed to retrieve admin list: %v", err) + return + } + + notificationMsg := fmt.Sprintf("A new user has joined: %s %s (ID: %d)", newUser.FirstName, newUser.LastName, newUser.ID) + + for _, admin := range admins { + b.logger.Printf("Notifying admin %d about new user %d", admin.UserID, newUser.ID) + _, err := b.tgCtx.SendMessage(admin.ChatID, &tg.MessagesSendMessageRequest{Message: notificationMsg}) + if err != nil { + b.logger.Printf("Failed to notify admin %d: %v", admin.UserID, err) + } + } +} + +func (b *TelegramBot) handleAuthorizeUser(ctx *ext.Context, u *ext.Update) error { + // Only allow admins to run this command + adminID := u.EffectiveUser().ID + userInfo, err := b.userRepository.GetUserInfo(adminID) + if err != nil { + b.logger.Printf("Failed to retrieve user info for admin check: %v", err) + return b.sendReply(ctx, u, "Failed to authorize the user.") + } + + if !userInfo.IsAdmin { + return b.sendReply(ctx, u, "You are not authorized to perform this action.") + } + + // Parse the user ID and optional admin flag from the command + args := strings.Fields(u.EffectiveMessage.Text) + if len(args) < 2 { + return b.sendReply(ctx, u, "Usage: /authorize [admin]") + } + targetUserID, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + return b.sendReply(ctx, u, "Invalid user ID.") + } + + isAdmin := len(args) > 2 && args[2] == "admin" + + // Authorize the user and optionally promote to admin + err = b.userRepository.AuthorizeUser(targetUserID, isAdmin) + if err != nil { + b.logger.Printf("Failed to authorize user %d: %v", targetUserID, err) + return b.sendReply(ctx, u, "Failed to authorize the user.") + } + + adminMsg := "" + if isAdmin { + adminMsg = " as an admin" + } + return b.sendReply(ctx, u, fmt.Sprintf("User %d has been authorized%s.", targetUserID, adminMsg)) +} + +func (b *TelegramBot) handleAnyUpdate(ctx *ext.Context, u *ext.Update) error { + return nil +} + +func (b *TelegramBot) handleVideoMessages(ctx *ext.Context, u *ext.Update) error { + chatID := u.EffectiveChat().GetID() + b.logger.Printf("Processing video message for chat ID: %d", chatID) + + if !b.isUserChat(ctx, chatID) { + return dispatcher.EndGroups + } + + if supported, err := isSupportedMedia(u.EffectiveMessage); !supported || err != nil { + b.logger.Printf("Unsupported media type received in chat ID %d", chatID) + return dispatcher.EndGroups + } + + file, err := utils.FileFromMedia(u.EffectiveMessage.Message.Media) + if err != nil { + b.logger.Printf("Error extracting media file for chat ID %d, message ID %d: %v", u.EffectiveChat().GetID(), u.EffectiveMessage.Message.ID, err) + return err + } + + fileURL := b.generateFileURL(u.EffectiveMessage.Message.ID, file) + b.logger.Printf("Generated media file URL for message ID %d in chat ID %d: %s", u.EffectiveMessage.Message.ID, chatID, fileURL) + + return b.sendMediaToUser(ctx, u, fileURL, file) +} + +func (b *TelegramBot) isUserChat(ctx *ext.Context, chatID int64) bool { + peerChatID := ctx.PeerStorage.GetPeerById(chatID) + if peerChatID.Type != int(storage.TypeUser) { + b.logger.Printf("Chat ID %d is not a user type. Terminating processing.", chatID) + return false + } + return true +} + +func (b *TelegramBot) sendReply(ctx *ext.Context, u *ext.Update, msg string) error { + _, err := ctx.Reply(u, msg, &ext.ReplyOpts{}) + if err != nil { + b.logger.Printf("Failed to send reply to user: %s (ID: %d) - Error: %v", u.EffectiveUser().FirstName, u.EffectiveUser().ID, err) + } + return err +} + +func (b *TelegramBot) sendMediaURLReply(ctx *ext.Context, u *ext.Update, msg, webURL string) error { + _, err := ctx.Reply(u, msg, &ext.ReplyOpts{ + Markup: &tg.ReplyInlineMarkup{ + Rows: []tg.KeyboardButtonRow{ + { + Buttons: []tg.KeyboardButtonClass{ + &tg.KeyboardButtonURL{Text: "Open Web URL", URL: webURL}, + &tg.KeyboardButtonURL{Text: "WebBridgeBot on GitHub", URL: "https://github.com/mshafiee/webbridgebot"}, + }, + }, + }, + }, + }) + if err != nil { + b.logger.Printf("Failed to send reply to user: %s (ID: %d) - Error: %v", u.EffectiveUser().FirstName, u.EffectiveUser().ID, err) + } + return err +} + +func (b *TelegramBot) sendMediaToUser(ctx *ext.Context, u *ext.Update, fileURL string, file *types.DocumentFile) error { + _, err := ctx.Reply(u, fileURL, &ext.ReplyOpts{ + Markup: &tg.ReplyInlineMarkup{ + Rows: []tg.KeyboardButtonRow{ + { + Buttons: []tg.KeyboardButtonClass{ + &tg.KeyboardButtonCallback{ + Text: "Resend to Player", + Data: []byte(fmt.Sprintf("%s,%d", callbackResendToPlayer, u.EffectiveMessage.Message.ID)), + }, + &tg.KeyboardButtonURL{Text: "Stream URL", URL: fileURL}, + }, + }, + }, + }, + }) + if err != nil { + b.logger.Printf("Error sending reply for chat ID %d, message ID %d: %v", u.EffectiveChat().GetID(), u.EffectiveMessage.Message.ID, err) + return err + } + + wsMsg := b.constructWebSocketMessage(fileURL, file) + b.publishToWebSocket(u.EffectiveChat().GetID(), wsMsg) + return nil +} + +func (b *TelegramBot) constructWebSocketMessage(fileURL string, file *types.DocumentFile) map[string]string { + return map[string]string{ + "url": fileURL, + "fileName": file.FileName, + "fileId": strconv.Itoa(int(file.ID)), + "mimeType": file.MimeType, + "duration": strconv.Itoa(int(file.VideoAttr.Duration)), + "width": strconv.Itoa(file.VideoAttr.W), + "height": strconv.Itoa(file.VideoAttr.H), + } +} + +func (b *TelegramBot) generateFileURL(messageID int, file *types.DocumentFile) string { + hash := utils.GetShortHash(utils.PackFile( + file.FileName, + file.FileSize, + file.MimeType, + file.ID, + ), b.config.HashLength) + return fmt.Sprintf("%s/%d/%s", b.config.BaseURL, messageID, hash) +} + +func (b *TelegramBot) publishToWebSocket(chatID int64, message map[string]string) { + if client, ok := wsClients[chatID]; ok { + messageJSON, err := json.Marshal(message) + if err != nil { + log.Println("Error marshalling message:", err) + return + } + if err := client.WriteMessage(websocket.TextMessage, messageJSON); err != nil { + log.Println("Error sending WebSocket message:", err) + delete(wsClients, chatID) + client.Close() + } + } +} + +func (b *TelegramBot) handleCallbackQuery(ctx *ext.Context, u *ext.Update) error { + dataParts := strings.Split(string(u.CallbackQuery.Data), ",") + if len(dataParts) > 0 && dataParts[0] == callbackResendToPlayer && len(dataParts) > 1 { + messageID, err := strconv.Atoi(dataParts[1]) + if err != nil { + return err + } + + file, err := utils.FileFromMessage(ctx, b.tgClient, messageID) + if err != nil { + b.logger.Printf("Error fetching file for message ID %d: %v", messageID, err) + } + + wsMsg := b.constructWebSocketMessage(b.generateFileURL(messageID, file), file) + b.publishToWebSocket(u.EffectiveChat().GetID(), wsMsg) + + _, _ = ctx.AnswerCallback(&tg.MessagesSetBotCallbackAnswerRequest{ + Alert: true, + QueryID: u.CallbackQuery.QueryID, + Message: fmt.Sprintf("The %s file has been sent to the web player.", file.FileName), + }) + } + return nil +} + +func isSupportedMedia(m *gtypes.Message) (bool, error) { + if m.Media == nil { + return false, dispatcher.EndGroups + } + switch m.Media.(type) { + case *tg.MessageMediaDocument: + return true, nil + default: + return false, nil + } +} + +func (b *TelegramBot) startWebServer() { + router := mux.NewRouter() + + router.HandleFunc("/ws/{chatID}", b.handleWebSocket) + router.HandleFunc("/{messageID}/{hash}", b.handleStream) + router.HandleFunc("/{chatID}", b.handlePlayer) + router.HandleFunc("/{chatID}/", b.handlePlayer) + + log.Printf("Web server started on port %s", b.config.Port) + if err := http.ListenAndServe(fmt.Sprintf(":%s", b.config.Port), router); err != nil { + log.Panic(err) + } +} + +// handleWebSocket manages WebSocket connections. +func (b *TelegramBot) handleWebSocket(w http.ResponseWriter, r *http.Request) { + chatID, err := b.parseChatID(mux.Vars(r)) + if err != nil { + http.Error(w, "Invalid chat ID", http.StatusBadRequest) + return + } + + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println(err) + return + } + defer ws.Close() + + // Register the WebSocket client. + wsClients[chatID] = ws + + for { + // Keep the connection alive or handle control messages. + messageType, p, err := ws.ReadMessage() + if err != nil { + log.Println(err) + delete(wsClients, chatID) + break + } + // Echo the message back (optional, for keeping the connection alive). + if err := ws.WriteMessage(messageType, p); err != nil { + log.Println(err) + break + } + } +} + +// handleStream handles the file streaming from Telegram. +func (b *TelegramBot) handleStream(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + vars := mux.Vars(r) + messageIDStr := vars["messageID"] + authHash := vars["hash"] + + b.logger.Printf("Received request to stream file with message ID: %s from client %s", messageIDStr, r.RemoteAddr) + + // Parse and validate message ID. + messageID, err := strconv.Atoi(messageIDStr) + if err != nil { + b.logger.Printf("Invalid message ID '%s' received from client %s", messageIDStr, r.RemoteAddr) + http.Error(w, "Invalid message ID format", http.StatusBadRequest) + return + } + + // Fetch the file from Telegram. + file, err := utils.FileFromMessage(ctx, b.tgClient, messageID) + if err != nil { + b.logger.Printf("Error fetching file for message ID %d: %v", messageID, err) + http.Error(w, "Unable to retrieve file for the specified message", http.StatusBadRequest) + return + } + + expectedHash := utils.PackFile(file.FileName, file.FileSize, file.MimeType, file.ID) + if !utils.CheckHash(authHash, expectedHash, b.config.HashLength) { + b.logger.Printf("Hash verification failed for message ID %d from client %s", messageID, r.RemoteAddr) + http.Error(w, "Invalid authentication hash", http.StatusBadRequest) + return + } + + contentLength := file.FileSize + + // Default range values for full content. + var start, end int64 = 0, contentLength - 1 + + // Process range header if present. + rangeHeader := r.Header.Get("Range") + if rangeHeader != "" { + b.logger.Printf("Range header received for message ID %d: %s", messageID, rangeHeader) + if strings.HasPrefix(rangeHeader, "bytes=") { + ranges := strings.Split(rangeHeader[len("bytes="):], "-") + if len(ranges) == 2 { + if ranges[0] != "" { + start, err = strconv.ParseInt(ranges[0], 10, 64) + if err != nil { + b.logger.Printf("Invalid start range value for message ID %d: %v", messageID, err) + http.Error(w, "Invalid range start value", http.StatusBadRequest) + return + } + } + if ranges[1] != "" { + end, err = strconv.ParseInt(ranges[1], 10, 64) + if err != nil { + b.logger.Printf("Invalid end range value for message ID %d: %v", messageID, err) + http.Error(w, "Invalid range end value", http.StatusBadRequest) + return + } + } + } + } + } + + // Validate the requested range. + if start > end || start < 0 || end >= contentLength { + b.logger.Printf("Requested range not satisfiable for message ID %d: start=%d, end=%d, contentLength=%d", messageID, start, end, contentLength) + http.Error(w, "Requested range not satisfiable", http.StatusRequestedRangeNotSatisfiable) + return + } + + // Create a TelegramReader to stream the content. + lr, err := reader.NewTelegramReader(ctx, b.tgClient, file.Location, start, end, contentLength, b.config.BinaryCache) + if err != nil { + b.logger.Printf("Error creating Telegram reader for message ID %d: %v", messageID, err) + http.Error(w, "Failed to initialize file stream", http.StatusInternalServerError) + return + } + defer lr.Close() + + // Send appropriate headers and stream the content. + if rangeHeader != "" { + b.logger.Printf("Serving partial content for message ID %d: bytes %d-%d of %d", messageID, start, end, contentLength) + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, contentLength)) + w.Header().Set("Content-Length", strconv.FormatInt(end-start+1, 10)) + w.Header().Set("Content-Type", "application/octet-stream") + w.WriteHeader(http.StatusPartialContent) + } else { + b.logger.Printf("Serving full content for message ID %d", messageID) + w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10)) + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, file.FileName)) + } + + // Stream the content to the client. + if _, err := io.Copy(w, lr); err != nil { + b.logger.Printf("Error streaming content for message ID %d: %v", messageID, err) + http.Error(w, "Error streaming content", http.StatusInternalServerError) + } +} + +func (b *TelegramBot) parseChatID(vars map[string]string) (int64, error) { + chatIDStr, ok := vars["chatID"] + if !ok { + return 0, fmt.Errorf("Chat ID is required") + } + + return strconv.ParseInt(chatIDStr, 10, 64) +} + +func (b *TelegramBot) handlePlayer(w http.ResponseWriter, r *http.Request) { + log.Printf("Received request for player: %s", r.URL.Path) + + chatID, err := b.parseChatID(mux.Vars(r)) + if err != nil { + http.Error(w, "Invalid chat ID", http.StatusBadRequest) + return + } + + t, err := template.ParseFiles(tmplPath) + if err != nil { + b.logger.Printf("Error loading template: %v", err) + http.Error(w, "Failed to load template", http.StatusInternalServerError) + return + } + + if err := t.Execute(w, map[string]interface{}{"ChatID": chatID}); err != nil { + b.logger.Printf("Error rendering template: %v", err) + http.Error(w, "Failed to render template", http.StatusInternalServerError) + } +} diff --git a/internal/cache/cache.go b/internal/cache/cache.go new file mode 100644 index 0000000..5f57532 --- /dev/null +++ b/internal/cache/cache.go @@ -0,0 +1,63 @@ +package cache + +import ( + "bytes" + "encoding/gob" + "sync" + "webBridgeBot/internal/types" + + "github.com/coocood/freecache" + "github.com/gotd/td/tg" +) + +var cache *Cache + +type Cache struct { + cache *freecache.Cache + mu sync.RWMutex +} + +func init() { + gob.Register(types.DocumentFile{}) + gob.Register(tg.InputDocumentFileLocation{}) + cache = &Cache{cache: freecache.NewCache(10 * 1024 * 1024)} +} + +func GetCache() *Cache { + return cache +} + +func (c *Cache) Get(key string, value *types.DocumentFile) error { + c.mu.RLock() + defer c.mu.RUnlock() + data, err := cache.cache.Get([]byte(key)) + if err != nil { + return err + } + dec := gob.NewDecoder(bytes.NewReader(data)) + err = dec.Decode(&value) + if err != nil { + return err + } + return nil +} + +func (c *Cache) Set(key string, value *types.DocumentFile, expireSeconds int) error { + c.mu.Lock() + defer c.mu.Unlock() + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(value) + if err != nil { + return err + } + cache.cache.Set([]byte(key), buf.Bytes(), expireSeconds) + return nil +} + +func (c *Cache) Delete(key string) error { + c.mu.Lock() + defer c.mu.Unlock() + cache.cache.Del([]byte(key)) + return nil +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..b1be912 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,110 @@ +package config + +import ( + "fmt" + "log" + "time" + + "github.com/spf13/viper" + "webBridgeBot/internal/reader" +) + +const ( + DefaultChunkSize int64 = 1024 * 1024 // 1 MB +) + +type Configuration struct { + ApiID int + ApiHash string + BotToken string + BaseURL string + Port string + HashLength int + BinaryCache *reader.BinaryCache + CacheDirectory string + MaxCacheSize int64 + DatabasePath string + Timeout time.Duration + DebugMode bool +} + +// initializeViper sets up viper with environment variable overrides +func initializeViper() { + viper.SetConfigFile(".env") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + log.Printf("Error reading config file: %v", err) + } +} + +// validateMandatoryFields checks for mandatory fields and terminates if any are missing +func validateMandatoryFields(config Configuration) { + if config.ApiID == 0 { + log.Fatal("API_ID is required and not set") + } + if config.ApiHash == "" { + log.Fatal("API_HASH is required and not set") + } + if config.BotToken == "" { + log.Fatal("BOT_TOKEN is required and not set") + } + if config.BaseURL == "" { + log.Fatal("BASE_URL is required and not set") + } +} + +// setDefaultValues sets default values for optional configuration fields +func setDefaultValues(config *Configuration) { + if config.HashLength < 6 { + config.HashLength = 8 + } + if config.CacheDirectory == "" { + config.CacheDirectory = ".cache" + } + if config.MaxCacheSize == 0 { + config.MaxCacheSize = 10 * 1024 * 1024 * 1024 // 10 GB default + } + if config.DatabasePath == "" { + config.DatabasePath = fmt.Sprintf("%s/webBridgeBot.db", config.CacheDirectory) + } + if config.Timeout == 0 { + config.Timeout = 30 * time.Second + } +} + +func LoadConfig() Configuration { + initializeViper() + + config := Configuration{ + ApiID: viper.GetInt("API_ID"), + ApiHash: viper.GetString("API_HASH"), + BotToken: viper.GetString("BOT_TOKEN"), + BaseURL: viper.GetString("BASE_URL"), + Port: viper.GetString("PORT"), + HashLength: viper.GetInt("HASH_LENGTH"), + CacheDirectory: viper.GetString("CACHE_DIRECTORY"), + MaxCacheSize: viper.GetInt64("MAX_CACHE_SIZE"), + Timeout: viper.GetDuration("TIMEOUT"), + DebugMode: viper.GetBool("DEBUG_MODE"), + } + + validateMandatoryFields(config) + setDefaultValues(&config) + + var err error + config.BinaryCache, err = reader.NewBinaryCache( + config.CacheDirectory, + config.MaxCacheSize, + DefaultChunkSize, + ) + if err != nil { + log.Fatalf("Error initializing BinaryCache: %v", err) + } + + if config.DebugMode { + log.Printf("Loaded configuration: %+v", config) + } + + return config +} diff --git a/internal/data/user.go b/internal/data/user.go new file mode 100644 index 0000000..5194ca2 --- /dev/null +++ b/internal/data/user.go @@ -0,0 +1,117 @@ +package data + +import ( + "database/sql" + "fmt" +) + +type User struct { + UserID int64 + ChatID int64 + FirstName string + LastName string + Username string + IsAuthorized bool + IsAdmin bool + CreatedAt string +} + +type UserRepository struct { + db *sql.DB +} + +// NewUserRepository creates a new instance of UserRepository. +func NewUserRepository(db *sql.DB) *UserRepository { + return &UserRepository{db: db} +} + +// InitDB initializes the database by creating necessary tables. +func (r *UserRepository) InitDB() error { + query := ` + CREATE TABLE IF NOT EXISTS users ( + user_id INTEGER PRIMARY KEY, + chat_id INTEGER NOT NULL, + first_name TEXT, + last_name TEXT, + username TEXT, + is_authorized BOOLEAN DEFAULT FALSE, + is_admin BOOLEAN DEFAULT FALSE, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP + );` + + _, err := r.db.Exec(query) + if err != nil { + return fmt.Errorf("failed to create users table: %w", err) + } + + return nil +} + +// StoreUserInfo stores or updates user information in the database. +func (r *UserRepository) StoreUserInfo(userID, chatID int64, firstName, lastName, username string, isAuthorized, isAdmin bool) error { + query := ` + INSERT INTO users (user_id, chat_id, first_name, last_name, username, is_authorized, is_admin) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(user_id) DO UPDATE SET + chat_id=excluded.chat_id, + first_name=excluded.first_name, + last_name=excluded.last_name, + username=excluded.username, + is_authorized=excluded.is_authorized, + is_admin=excluded.is_admin; + ` + + _, err := r.db.Exec(query, userID, chatID, firstName, lastName, username, isAuthorized, isAdmin) + return err +} + +// GetUserInfo retrieves user information from the database by user ID. +func (r *UserRepository) GetUserInfo(userID int64) (*User, error) { + query := `SELECT user_id, chat_id, first_name, last_name, username, is_authorized, is_admin, created_at FROM users WHERE user_id = ?` + row := r.db.QueryRow(query, userID) + + var user User + if err := row.Scan(&user.UserID, &user.ChatID, &user.FirstName, &user.LastName, &user.Username, &user.IsAuthorized, &user.IsAdmin, &user.CreatedAt); err != nil { + return nil, err + } + + return &user, nil +} + +// IsFirstUser checks if the current user is the first user in the database. +func (r *UserRepository) IsFirstUser() (bool, error) { + query := `SELECT COUNT(*) FROM users` + var count int + err := r.db.QueryRow(query).Scan(&count) + if err != nil { + return false, err + } + return count == 0, nil +} + +// AuthorizeUser sets the is_authorized and optionally is_admin flags for a given user. +func (r *UserRepository) AuthorizeUser(userID int64, isAdmin bool) error { + query := `UPDATE users SET is_authorized = TRUE, is_admin = ? WHERE user_id = ?` + _, err := r.db.Exec(query, isAdmin, userID) + return err +} + +// GetAllAdmins retrieves a list of all admin users. +func (r *UserRepository) GetAllAdmins() ([]User, error) { + query := `SELECT user_id, chat_id, first_name, last_name, username FROM users WHERE is_admin = TRUE` + rows, err := r.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var admins []User + for rows.Next() { + var user User + if err := rows.Scan(&user.UserID, &user.ChatID, &user.FirstName, &user.LastName, &user.Username); err != nil { + return nil, err + } + admins = append(admins, user) + } + return admins, nil +} diff --git a/internal/reader/binary_cache.go b/internal/reader/binary_cache.go new file mode 100644 index 0000000..77b7bdb --- /dev/null +++ b/internal/reader/binary_cache.go @@ -0,0 +1,510 @@ +package reader + +import ( + "container/heap" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" +) + +type chunkMetadata struct { + LocationID int64 + ChunkIndex int64 + Offset int64 + Size int64 // Actual size of the data in this chunk, not the padded size + Timestamp int64 +} + +// Helper methods for converting the `Timestamp` to/from `time.Time` +func (meta *chunkMetadata) SetTimestamp(t time.Time) { + meta.Timestamp = t.Unix() +} + +func (meta *chunkMetadata) GetTimestamp() time.Time { + return time.Unix(meta.Timestamp, 0) +} + +type BinaryCache struct { + cashFile *os.File + metadataFile *os.File + metadata map[int64]map[int64][]chunkMetadata // Map of location ID to chunk ID to metadata + metadataLock sync.Mutex + chunkLock sync.Mutex + cacheSize int64 + maxCacheSize int64 + lruQueue *PriorityQueue + evictionList []*chunkMetadata + fixedChunkSize int64 +} + +// LRUItem represents an item in the LRU cache with its priority. +type LRUItem struct { + locationID int64 + chunkID int64 + timestamp int64 + index int // The index of the item in the heap. +} + +// PriorityQueue implements a min-heap for LRU eviction. +type PriorityQueue []*LRUItem + +func (pq PriorityQueue) Len() int { return len(pq) } + +func (pq PriorityQueue) Less(i, j int) bool { + return pq[i].timestamp < pq[j].timestamp +} + +func (pq PriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *PriorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*LRUItem) + item.index = n + *pq = append(*pq, item) +} + +func (pq *PriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + old[n-1] = nil // Avoid memory leak + item.index = -1 // For safety + *pq = old[0 : n-1] + return item +} + +func (pq *PriorityQueue) update(item *LRUItem, timestamp int64) { + item.timestamp = timestamp + heap.Fix(pq, item.index) +} + +// NewBinaryCache initializes a new binary cache +func NewBinaryCache(cacheDir string, maxCacheSize int64, fixedChunkSize int64) (*BinaryCache, error) { + // Create the cache directory if it doesn't exist + err := os.MkdirAll(cacheDir, 0755) + if err != nil { + return nil, err + } + + // Define the file paths for cache and metadata + cacheFilename := filepath.Join(cacheDir, "cache.dat") + metadataFilename := filepath.Join(cacheDir, "metadata.dat") + + // Open or create the cache file + file, err := os.OpenFile(cacheFilename, os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + + // Open or create the metadata file + metadataFile, err := os.OpenFile(metadataFilename, os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + file.Close() + return nil, err + } + + // Initialize the BinaryCache struct + bc := &BinaryCache{ + cashFile: file, + metadataFile: metadataFile, + metadata: make(map[int64]map[int64][]chunkMetadata), + maxCacheSize: maxCacheSize, + lruQueue: &PriorityQueue{}, + fixedChunkSize: fixedChunkSize, + } + + // Load metadata from the metadata file if it exists + err = bc.loadMetadata() + if err != nil { + return nil, err + } + + // Initialize the priority queue (LRU queue) + heap.Init(bc.lruQueue) + + return bc, nil +} + +// Write a chunk to the binary cashFile +func (bc *BinaryCache) writeChunk(locationID int64, chunkID int64, chunk []byte) error { + bc.chunkLock.Lock() + defer bc.chunkLock.Unlock() + + if _, exists := bc.metadata[locationID]; !exists { + bc.metadata[locationID] = make(map[int64][]chunkMetadata) + } + + // Evict if cache size exceeds max size before writing new data + bc.evictIfNeeded() + + // Split the chunk into fixed-sized chunks + chunkParts := bc.splitChunk(chunk) + + // Write each part + for i, part := range chunkParts { + err := bc.writeChunkPart(locationID, chunkID, int64(i), part) + if err != nil { + return err + } + } + + // Save the metadata to the metadata file + return bc.saveMetadata() +} + +// Helper method to split the chunk into fixed-size parts +func (bc *BinaryCache) splitChunk(chunk []byte) [][]byte { + var parts [][]byte + for len(chunk) > 0 { + partSize := bc.fixedChunkSize + if int64(len(chunk)) < bc.fixedChunkSize { + partSize = int64(len(chunk)) + } + parts = append(parts, chunk[:partSize]) + chunk = chunk[partSize:] + } + return parts +} + +// Helper method to write a part of the chunk +func (bc *BinaryCache) writeChunkPart(locationID, chunkID, partIndex int64, part []byte) error { + var offset int64 + var err error + + // Check if we can overwrite an evicted chunk + if len(bc.evictionList) > 0 { + evictedMeta := bc.evictionList[len(bc.evictionList)-1] + bc.evictionList = bc.evictionList[:len(bc.evictionList)-1] // Remove the last element + offset = evictedMeta.Offset + } else { + offset, err = bc.cashFile.Seek(0, os.SEEK_END) + if err != nil { + return err + } + } + + // Pad the part to the fixed chunk size if necessary + paddedPart := make([]byte, bc.fixedChunkSize) + copy(paddedPart, part) + + // Write the padded part to the file + _, err = bc.cashFile.WriteAt(paddedPart, offset) + if err != nil { + return err + } + + timestamp := time.Now().Unix() + meta := chunkMetadata{ + LocationID: locationID, + ChunkIndex: partIndex, + Offset: offset, + Size: int64(len(part)), // Store the actual size of the part, not the padded size + Timestamp: timestamp, // Store the current timestamp as int64 + } + + // Update the metadata + bc.metadata[locationID][chunkID] = append(bc.metadata[locationID][chunkID], meta) + bc.cacheSize += bc.fixedChunkSize + + // Add to LRU queue + bc.addLRU(locationID, chunkID, timestamp) + + return nil +} + +// Read a specific chunk from the binary cashFile +func (bc *BinaryCache) readChunk(locationID int64, chunkID int64) ([]byte, error) { + bc.chunkLock.Lock() + defer bc.chunkLock.Unlock() + + locationMetadata, exists := bc.metadata[locationID] + if !exists { + return nil, fmt.Errorf("location ID %d not found", locationID) + } + + chunkMetadata, exists := locationMetadata[chunkID] + if !exists { + return nil, fmt.Errorf("chunk %d not found for location ID %d", chunkID, locationID) + } + + // Combine all parts + var chunk []byte + for _, meta := range chunkMetadata { + part, err := bc.readChunkPart(meta) + if err != nil { + return nil, err + } + chunk = append(chunk, part...) + } + + // Update the timestamp for LRU + timestamp := time.Now().Unix() + for _, meta := range chunkMetadata { + meta.SetTimestamp(time.Now()) + } + + // Update the LRU queue + bc.updateLRU(locationID, chunkID, timestamp) + + return chunk, nil +} + +// Helper method to read a part of the chunk +func (bc *BinaryCache) readChunkPart(meta chunkMetadata) ([]byte, error) { + // Seek to the chunk's offset + _, err := bc.cashFile.Seek(meta.Offset, os.SEEK_SET) + if err != nil { + return nil, err + } + + // Read the chunk's data + paddedPart := make([]byte, bc.fixedChunkSize) + _, err = bc.cashFile.Read(paddedPart) + if err != nil { + return nil, err + } + + // Return only the actual size of the data, trimming any padding + return paddedPart[:meta.Size], nil +} + +// Add a chunk to the LRU queue +func (bc *BinaryCache) addLRU(locationID int64, chunkID int64, timestamp int64) { + item := &LRUItem{ + locationID: locationID, + chunkID: chunkID, + timestamp: timestamp, + } + heap.Push(bc.lruQueue, item) +} + +// Update a chunk's position in the LRU queue +func (bc *BinaryCache) updateLRU(locationID int64, chunkID int64, timestamp int64) { + for _, item := range *bc.lruQueue { + if item.locationID == locationID && item.chunkID == chunkID { + bc.lruQueue.update(item, timestamp) + return + } + } +} + +// Evict chunks until the cache size is within the limit +func (bc *BinaryCache) evictIfNeeded() { + for bc.cacheSize >= bc.maxCacheSize && bc.lruQueue.Len() > 0 { // Changed from '>' to '>=' + + // Evict the least recently used chunk + item := heap.Pop(bc.lruQueue).(*LRUItem) + metas := bc.metadata[item.locationID][item.chunkID] + for _, meta := range metas { + bc.evictionList = append(bc.evictionList, &meta) // Add to the list of evicted chunks + bc.cacheSize -= bc.fixedChunkSize + } + delete(bc.metadata[item.locationID], item.chunkID) + if len(bc.metadata[item.locationID]) == 0 { + delete(bc.metadata, item.locationID) + } + } +} + +// Save metadata to the metadata cashFile +func (bc *BinaryCache) saveMetadata() error { + bc.metadataLock.Lock() + defer bc.metadataLock.Unlock() + + _, err := bc.metadataFile.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Clear the metadata cashFile before saving new data + err = bc.metadataFile.Truncate(0) + if err != nil { + return err + } + + totalChunks := int64(0) + for _, locationChunks := range bc.metadata { + totalChunks += int64(len(locationChunks)) + } + + err = binary.Write(bc.metadataFile, binary.LittleEndian, totalChunks) + if err != nil { + return err + } + + for locationID, locationChunks := range bc.metadata { + for chunkID, metas := range locationChunks { + for _, meta := range metas { + err := binary.Write(bc.metadataFile, binary.LittleEndian, locationID) + if err != nil { + return err + } + err = binary.Write(bc.metadataFile, binary.LittleEndian, chunkID) + if err != nil { + return err + } + err = binary.Write(bc.metadataFile, binary.LittleEndian, meta.LocationID) + if err != nil { + return err + } + err = binary.Write(bc.metadataFile, binary.LittleEndian, meta.ChunkIndex) + if err != nil { + return err + } + err = binary.Write(bc.metadataFile, binary.LittleEndian, meta.Offset) + if err != nil { + return err + } + err = binary.Write(bc.metadataFile, binary.LittleEndian, meta.Size) + if err != nil { + return err + } + err = binary.Write(bc.metadataFile, binary.LittleEndian, meta.Timestamp) + if err != nil { + return err + } + } + } + } + + return bc.metadataFile.Sync() +} + +// Load metadata from the metadata cashFile +func (bc *BinaryCache) loadMetadata() error { + bc.metadataLock.Lock() + defer bc.metadataLock.Unlock() + + // Get the metadata cashFile size + fileInfo, err := bc.metadataFile.Stat() + if err != nil { + return err + } + fileSize := fileInfo.Size() + + // Check if the metadata cashFile is empty or corrupted + if fileSize == 0 { + return bc.initializeFile() + } + + _, err = bc.metadataFile.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Read number of chunks + var numChunks int64 + err = binary.Read(bc.metadataFile, binary.LittleEndian, &numChunks) + if err != nil { + return bc.initializeFile() + } + + for i := int64(0); i < numChunks; i++ { + var locationID int64 + var chunkID int64 + var meta chunkMetadata + + err = binary.Read(bc.metadataFile, binary.LittleEndian, &locationID) + if err != nil { + if err == io.EOF { + break // Gracefully handle unexpected EOF + } + return err + } + err = binary.Read(bc.metadataFile, binary.LittleEndian, &chunkID) + if err != nil { + if err == io.EOF { + break // Gracefully handle unexpected EOF + } + return err + } + err = binary.Read(bc.metadataFile, binary.LittleEndian, &meta.LocationID) + if err != nil { + if err == io.EOF { + break // Gracefully handle unexpected EOF + } + return err + } + err = binary.Read(bc.metadataFile, binary.LittleEndian, &meta.ChunkIndex) + if err != nil { + if err == io.EOF { + break // Gracefully handle unexpected EOF + } + return err + } + err = binary.Read(bc.metadataFile, binary.LittleEndian, &meta.Offset) + if err != nil { + if err == io.EOF { + break // Gracefully handle unexpected EOF + } + return err + } + err = binary.Read(bc.metadataFile, binary.LittleEndian, &meta.Size) + if err != nil { + if err == io.EOF { + break // Gracefully handle unexpected EOF + } + return err + } + err = binary.Read(bc.metadataFile, binary.LittleEndian, &meta.Timestamp) + if err != nil { + if err == io.EOF { + break // Gracefully handle unexpected EOF + } + return err + } + + if _, exists := bc.metadata[locationID]; !exists { + bc.metadata[locationID] = make(map[int64][]chunkMetadata) + } + + bc.metadata[locationID][chunkID] = append(bc.metadata[locationID][chunkID], meta) + bc.cacheSize += bc.fixedChunkSize + + // Add the chunk to the LRU queue + bc.addLRU(locationID, chunkID, meta.Timestamp) + } + + return nil +} + +// Initialize the metadata cashFile +func (bc *BinaryCache) initializeFile() error { + // Truncate the metadata cashFile to clear existing data + err := bc.metadataFile.Truncate(0) + if err != nil { + return err + } + + _, err = bc.metadataFile.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Initialize with zero chunks + var numChunks int64 = 0 + err = binary.Write(bc.metadataFile, binary.LittleEndian, numChunks) + if err != nil { + return err + } + + // Reset in-memory metadata + bc.metadata = make(map[int64]map[int64][]chunkMetadata) + bc.cacheSize = 0 + + // Ensure changes are written to disk + err = bc.metadataFile.Sync() + if err != nil { + return err + } + + return nil +} diff --git a/internal/reader/binary_cache_test.go b/internal/reader/binary_cache_test.go new file mode 100644 index 0000000..c941364 --- /dev/null +++ b/internal/reader/binary_cache_test.go @@ -0,0 +1,235 @@ +package reader + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "testing" +) + +func TestNewBinaryCache(t *testing.T) { + // Create a temporary directory for the test + tempDir := t.TempDir() + + // Initialize a new BinaryCache with a max cache size of 1024 bytes and a fixed chunk size of 256 bytes + cache, err := NewBinaryCache(tempDir, 1024, 256) + if err != nil { + t.Fatalf("Failed to initialize BinaryCache: %v", err) + } + + // Check if cache files exist + cacheFile := filepath.Join(tempDir, "cache.dat") + metadataFile := filepath.Join(tempDir, "metadata.dat") + + if _, err := os.Stat(cacheFile); os.IsNotExist(err) { + t.Errorf("Cache file was not created") + } + + if _, err := os.Stat(metadataFile); os.IsNotExist(err) { + t.Errorf("Metadata file was not created") + } + + // Close the cache files + cache.cashFile.Close() + cache.metadataFile.Close() +} + +func TestBinaryCache_WriteReadChunk(t *testing.T) { + // Create a temporary directory for the test + tempDir := t.TempDir() + + // Initialize a new BinaryCache + cache, err := NewBinaryCache(tempDir, 1024, 256) + if err != nil { + t.Fatalf("Failed to initialize BinaryCache: %v", err) + } + + locationID := int64(1) + chunkID := int64(1) + data := []byte("This is a test chunk of data.") + + // Write the chunk + err = cache.writeChunk(locationID, chunkID, data) + if err != nil { + t.Fatalf("Failed to write chunk: %v", err) + } + + // Read the chunk back + readData, err := cache.readChunk(locationID, chunkID) + if err != nil { + t.Fatalf("Failed to read chunk: %v", err) + } + + // Compare the written data with the read data + if !bytes.Equal(data, readData) { + t.Errorf("Data mismatch: expected %v, got %v", data, readData) + } + + // Close the cache files + cache.cashFile.Close() + cache.metadataFile.Close() +} + +func TestBinaryCache_LRU_Eviction(t *testing.T) { + // Create a temporary directory for the test + tempDir := t.TempDir() + + // Initialize a new BinaryCache with a small max cache size to force eviction + cache, err := NewBinaryCache(tempDir, 512, 256) + if err != nil { + t.Fatalf("Failed to initialize BinaryCache: %v", err) + } + + locationID := int64(1) + data1 := make([]byte, 256) // 256 bytes + data2 := make([]byte, 256) // 256 bytes + data3 := make([]byte, 256) // 256 bytes + + // Write multiple chunks to the cache + err = cache.writeChunk(locationID, 1, data1) + if err != nil { + t.Fatalf("Failed to write chunk 1: %v", err) + } + + fmt.Println("After writing chunk 1") + + err = cache.writeChunk(locationID, 2, data2) + if err != nil { + t.Fatalf("Failed to write chunk 2: %v", err) + } + + fmt.Println("After writing chunk 2") + + err = cache.writeChunk(locationID, 3, data3) // This should trigger eviction + if err != nil { + t.Fatalf("Failed to write chunk 3: %v", err) + } + + fmt.Println("After writing chunk 3 and before checking eviction") + + // Check that chunk 1 was evicted (since cache size is limited) + _, err = cache.readChunk(locationID, 1) + if err == nil { + t.Error("Expected chunk 1 to be evicted, but it was not") + } else { + fmt.Printf("Chunk 1 was successfully evicted, received error: %v\n", err) + } + + // Check that chunk 2 and chunk 3 are still present + _, err = cache.readChunk(locationID, 2) + if err != nil { + t.Errorf("Chunk 2 should still be present, but got error: %v", err) + } + + _, err = cache.readChunk(locationID, 3) + if err != nil { + t.Errorf("Chunk 3 should still be present, but got error: %v", err) + } + + // Close the cache files + cache.cashFile.Close() + cache.metadataFile.Close() +} + +func TestBinaryCache_MetadataPersistence(t *testing.T) { + // Create a temporary directory for the test + tempDir := t.TempDir() + + // Initialize a new BinaryCache + cache, err := NewBinaryCache(tempDir, 1024, 256) + if err != nil { + t.Fatalf("Failed to initialize BinaryCache: %v", err) + } + + locationID := int64(1) + chunkID := int64(1) + data := []byte("Persistent chunk data.") + + // Write the chunk + err = cache.writeChunk(locationID, chunkID, data) + if err != nil { + t.Fatalf("Failed to write chunk: %v", err) + } + + // Close and re-open the cache to simulate a restart + cache.cashFile.Close() + cache.metadataFile.Close() + + cache, err = NewBinaryCache(tempDir, 1024, 256) + if err != nil { + t.Fatalf("Failed to reinitialize BinaryCache: %v", err) + } + + // Read the chunk back + readData, err := cache.readChunk(locationID, chunkID) + if err != nil { + t.Fatalf("Failed to read chunk after reopening cache: %v", err) + } + + // Compare the written data with the read data + if !bytes.Equal(data, readData) { + t.Errorf("Data mismatch after reopening cache: expected %v, got %v", data, readData) + } + + // Close the cache files + cache.cashFile.Close() + cache.metadataFile.Close() +} + +func TestSplitChunk(t *testing.T) { + // Initialize a BinaryCache with a fixed chunk size + cache := &BinaryCache{ + fixedChunkSize: 256, // 256 bytes per chunk part + } + + t.Run("Basic Split", func(t *testing.T) { + chunk := make([]byte, 512) // 512 bytes, should be split into two parts + parts := cache.splitChunk(chunk) + + if len(parts) != 2 { + t.Errorf("Expected 2 parts, got %d", len(parts)) + } + + for i, part := range parts { + if len(part) != 256 { + t.Errorf("Part %d expected to have length 256, got %d", i, len(part)) + } + } + }) + + t.Run("Exact Size Split", func(t *testing.T) { + chunk := make([]byte, 256) // 256 bytes, should return one part + parts := cache.splitChunk(chunk) + + if len(parts) != 1 { + t.Errorf("Expected 1 part, got %d", len(parts)) + } + + if len(parts[0]) != 256 { + t.Errorf("Expected part to have length 256, got %d", len(parts[0])) + } + }) + + t.Run("Smaller Chunk", func(t *testing.T) { + chunk := make([]byte, 100) // 100 bytes, should return one part + parts := cache.splitChunk(chunk) + + if len(parts) != 1 { + t.Errorf("Expected 1 part, got %d", len(parts)) + } + + if len(parts[0]) != 100 { + t.Errorf("Expected part to have length 100, got %d", len(parts[0])) + } + }) + + t.Run("Empty Chunk", func(t *testing.T) { + chunk := make([]byte, 0) // Empty chunk, should return no parts + parts := cache.splitChunk(chunk) + + if len(parts) != 0 { + t.Errorf("Expected 0 parts, got %d", len(parts)) + } + }) +} diff --git a/internal/reader/reader.go b/internal/reader/reader.go new file mode 100644 index 0000000..8ae8881 --- /dev/null +++ b/internal/reader/reader.go @@ -0,0 +1,262 @@ +package reader + +import ( + "context" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "regexp" + "strconv" + "sync" + "syscall" + "time" + + "github.com/celestix/gotgproto" + "github.com/gotd/td/tg" +) + +const ( + chunkSize = int64(1024 * 1024) + maxRequestsPerSecond = 30 // Max number of requests per second. + maxRetries = 5 // Maximum number of retries. + baseDelay = time.Second // Initial delay for exponential backoff. + maxDelay = 60 * time.Second // Maximum delay for backoff. +) + +var ( + rateLimiter = time.NewTicker(time.Second / maxRequestsPerSecond) + mu sync.Mutex +) + +type telegramReader struct { + ctx context.Context + log *log.Logger + client *gotgproto.Client + location *tg.InputDocumentFileLocation + start int64 + end int64 + next func() ([]byte, error) + buffer []byte + bytesread int64 + chunkSize int64 + i int64 + contentLength int64 + cache *BinaryCache +} + +// NewTelegramReader initializes a new telegramReader with the given parameters, including a BinaryCache. +func NewTelegramReader(ctx context.Context, client *gotgproto.Client, + location *tg.InputDocumentFileLocation, + start int64, end int64, contentLength int64, cache *BinaryCache) (io.ReadCloser, error) { + logger := log.New(os.Stdout, "tgReader-", 0) + + r := &telegramReader{ + ctx: ctx, + log: logger, + location: location, + client: client, + start: start, + end: end, + chunkSize: chunkSize, + contentLength: contentLength, + cache: cache, + } + r.log.Println("Initialization complete.") + r.next = r.partStream() + return r, nil +} + +// Close implements the io.Closer interface but doesn't perform any actions. +func (*telegramReader) Close() error { + return nil +} + +// Read reads the next chunk of data into the provided byte slice. +func (r *telegramReader) Read(p []byte) (n int, err error) { + + if r.bytesread == r.contentLength { + r.log.Println("Reached end of cacheFile (bytesread == contentLength).") + return 0, io.EOF + } + + if r.i >= int64(len(r.buffer)) { + r.buffer, err = r.next() + if err != nil { + r.log.Printf("Error while reading data: %v", err) + return 0, err + } + if len(r.buffer) == 0 { + r.next = r.partStream() + r.buffer, err = r.next() + if err != nil { + r.log.Printf("Error while reading data: %v", err) + return 0, err + } + } + r.i = 0 + } + n = copy(p, r.buffer[r.i:]) + r.i += int64(n) + r.bytesread += int64(n) + return n, nil +} + +// chunk requests a cacheFile chunk from the Telegram API starting at the specified offset or retrieves it from the cache. +func (r *telegramReader) chunk(offset int64, limit int64) ([]byte, error) { + // Check if the chunk is already in the cache + chunkID := offset / r.chunkSize + cachedChunk, err := r.cache.readChunk(r.location.ID, chunkID) + if err == nil { + r.log.Printf("Cache hit for chunk %d.", chunkID) + return cachedChunk, nil + } + + r.log.Printf("Cache miss for chunk %d, requesting from Telegram API.", chunkID) + + // If not in cache, request it from Telegram + req := &tg.UploadGetFileRequest{ + Offset: offset, + Limit: int(limit), + Location: r.location, + } + return r.downloadAndCacheChunk(req, chunkID) +} + +// downloadAndCacheChunk combines rate limiting and exponential backoff. +func (r *telegramReader) downloadAndCacheChunk(req *tg.UploadGetFileRequest, chunkID int64) ([]byte, error) { + delay := baseDelay // Start with the base delay for exponential backoff. + + for retryCount := 0; retryCount < maxRetries; retryCount++ { + // Rate limiting: Wait for the rate limiter to allow a new request. + mu.Lock() + <-rateLimiter.C + mu.Unlock() + + res, err := r.client.API().UploadGetFile(r.ctx, req) + if err != nil { + // Handle FLOOD_WAIT error by sleeping for the specified time and retrying. + if floodWait, ok := isFloodWaitError(err); ok { + r.log.Printf("FLOOD_WAIT error: retrying in %d seconds.", floodWait) + time.Sleep(time.Duration(floodWait) * time.Second) + continue + } + + // Handle transient errors with exponential backoff. + if isTransientError(err) { + r.log.Printf("Transient error: %v, retrying in %v", err, delay) + time.Sleep(delay) + delay = min(delay*2, maxDelay) // Increase delay with exponential backoff, capping at maxDelay. + continue + } + + // Return non-transient errors without retrying. + r.log.Printf("Error during chunk download: %v", err) + return nil, err + } + + switch result := res.(type) { + case *tg.UploadFile: + chunkData := result.Bytes + err = r.cache.writeChunk(r.location.ID, chunkID, chunkData) + if err != nil { + r.log.Printf("Error writing chunk to cache: %v", err) + } + return chunkData, nil + default: + return nil, fmt.Errorf("Unexpected response type: %T", r) + } + } + + // If all retries are exhausted, return an error. + return nil, fmt.Errorf("failed to download chunk %d after %d retries", chunkID, maxRetries) +} + +// partStream returns a function that reads cacheFile chunks sequentially. +func (r *telegramReader) partStream() func() ([]byte, error) { + start := r.start + end := r.end + offset := start - (start % r.chunkSize) + + firstPartCut := start - offset + lastPartCut := (end % r.chunkSize) + 1 + partCount := int((end - offset + r.chunkSize) / r.chunkSize) + currentPart := 1 + + readData := func() ([]byte, error) { + if currentPart > partCount { + return make([]byte, 0), nil + } + res, err := r.chunk(offset, r.chunkSize) + if err != nil { + return nil, err + } + if len(res) == 0 { + return res, nil + } else if partCount == 1 { + res = res[firstPartCut:lastPartCut] + } else if currentPart == 1 { + res = res[firstPartCut:] + } else if currentPart == partCount { + res = res[:lastPartCut] + } + + currentPart++ + offset += r.chunkSize + return res, nil + } + return readData +} + +// isFloodWaitError checks if the error is a FLOOD_WAIT error and returns the wait time if true. +func isFloodWaitError(err error) (int, bool) { + // Identify FLOOD_WAIT errors and extract wait time if applicable. + errText := err.Error() + matched, _ := regexp.MatchString(`FLOOD_WAIT \(\d+\)`, errText) + if matched { + // Extract the wait time in seconds using a regular expression. + re := regexp.MustCompile(`FLOOD_WAIT \((\d+)\)`) + match := re.FindStringSubmatch(errText) + if len(match) > 1 { + waitTime, err := strconv.Atoi(match[1]) + if err == nil { + return waitTime, true + } + } + } + return 0, false +} + +// isTransientError checks if an error is transient (e.g., network issues), meaning it might be resolved by retrying. +func isTransientError(err error) bool { + // Handle network-related errors + var netErr net.Error + if errors.As(err, &netErr) { + // Retry on network timeouts or temporary errors + return netErr.Timeout() || netErr.Temporary() + } + + // Check for specific system call errors that might indicate a transient issue + if errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, syscall.ECONNABORTED) || errors.Is(err, syscall.ETIMEDOUT) { + return true + } + + // Handle context cancellation or deadline exceeded errors + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // These are transient in the sense that they may succeed if retried, depending on the situation + return true + } + + // If none of the above conditions match, consider the error non-transient + return false +} + +// min returns the minimum of two time.Duration values. +func min(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} diff --git a/internal/types/file.go b/internal/types/file.go new file mode 100644 index 0000000..758e5f9 --- /dev/null +++ b/internal/types/file.go @@ -0,0 +1,45 @@ +package types + +import ( + "crypto/md5" + "encoding/hex" + "reflect" + "strconv" + + "github.com/gotd/td/tg" +) + +type DocumentFile struct { + ID int64 + Location *tg.InputDocumentFileLocation + FileSize int64 + FileName string + MimeType string + VideoAttr tg.DocumentAttributeVideo +} + +type FileMetadata struct { + FileName string + FileSize int64 + MimeType string + FileID int64 +} + +func (h *FileMetadata) GenerateHash() string { + hasher := md5.New() + val := reflect.ValueOf(*h) + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + + var fieldValue []byte + switch field.Kind() { + case reflect.String: + fieldValue = []byte(field.String()) + case reflect.Int64: + fieldValue = []byte(strconv.FormatInt(field.Int(), 10)) + } + + hasher.Write(fieldValue) + } + return hex.EncodeToString(hasher.Sum(nil)) +} diff --git a/internal/types/user.go b/internal/types/user.go new file mode 100644 index 0000000..80632b0 --- /dev/null +++ b/internal/types/user.go @@ -0,0 +1,10 @@ +package types + +type User struct { + UserID int64 + ChatID int64 + FirstName string + LastName string + Username string + CreatedAt string +} diff --git a/internal/utils/hashing.go b/internal/utils/hashing.go new file mode 100644 index 0000000..e1a747a --- /dev/null +++ b/internal/utils/hashing.go @@ -0,0 +1,28 @@ +package utils + +import ( + "webBridgeBot/internal/types" +) + +// PackFile creates a packed string from the given file details. +func PackFile(fileName string, fileSize int64, mimeType string, fileID int64) string { + hashableFileStruct := types.FileMetadata{ + FileName: fileName, + FileSize: fileSize, + MimeType: mimeType, + FileID: fileID, + } + return hashableFileStruct.GenerateHash() +} + +// GetShortHash returns a shortened version of the provided hash. +func GetShortHash(fullHash string, hashLength int) string { + if len(fullHash) < hashLength { + return fullHash + } + return fullHash[:hashLength] +} + +func CheckHash(inputHash string, expectedHash string, hashLength int) bool { + return inputHash == GetShortHash(expectedHash, hashLength) +} diff --git a/internal/utils/helpers.go b/internal/utils/helpers.go new file mode 100644 index 0000000..95b8f88 --- /dev/null +++ b/internal/utils/helpers.go @@ -0,0 +1,167 @@ +package utils + +import ( + "context" + "errors" + "fmt" + "github.com/celestix/gotgproto/ext" + "math/rand" + "webBridgeBot/internal/cache" + "webBridgeBot/internal/types" + + "github.com/celestix/gotgproto" + "github.com/celestix/gotgproto/storage" + "github.com/gotd/td/tg" +) + +// https://stackoverflow.com/a/70802740/15807350 +func Contains[T comparable](s []T, e T) bool { + for _, v := range s { + if v == e { + return true + } + } + return false +} + +// GetMessage fetches the message by the specified message ID +func GetMessage(ctx context.Context, client *gotgproto.Client, messageID int) (*tg.Message, error) { + // Fetch messages using the client API + messages, err := client.API().MessagesGetMessages(ctx, []tg.InputMessageClass{ + &tg.InputMessageID{ID: messageID}, + }) + if err != nil { + return nil, err + } + + // Attempt to cast the response to the expected type + if msgs, ok := messages.(*tg.MessagesMessages); ok { + // Iterate over the messages to find the one with the matching ID + for _, msg := range msgs.Messages { + if m, ok := msg.(*tg.Message); ok && m.GetID() == messageID { + return m, nil + } + } + } + + return nil, fmt.Errorf("message not found") +} + +func FileFromMedia(media tg.MessageMediaClass) (*types.DocumentFile, error) { + switch media := media.(type) { + case *tg.MessageMediaDocument: + document, ok := media.Document.AsNotEmpty() + if !ok { + return nil, fmt.Errorf("unexpected type %T", media) + } + var fileName string + for _, attribute := range document.Attributes { + if name, ok := attribute.(*tg.DocumentAttributeFilename); ok { + fileName = name.FileName + break + } + } + + var videoAttr tg.DocumentAttributeVideo + for _, attribute := range document.Attributes { + if name, ok := attribute.(*tg.DocumentAttributeFilename); ok { + fileName = name.FileName + } + if documentAttributeVideo, ok := attribute.(*tg.DocumentAttributeVideo); ok { + videoAttr = *documentAttributeVideo + } + } + + return &types.DocumentFile{ + Location: document.AsInputDocumentFileLocation(), + FileSize: document.Size, + FileName: fileName, + MimeType: document.MimeType, + ID: document.ID, + VideoAttr: videoAttr, + }, nil + // TODO: add photo support + } + return nil, fmt.Errorf("unexpected type %T", media) +} + +func FileFromMessage(ctx context.Context, client *gotgproto.Client, messageID int) (*types.DocumentFile, error) { + key := fmt.Sprintf("file:%d:%d", messageID, client.Self.ID) + var cachedMedia types.DocumentFile + err := cache.GetCache().Get(key, &cachedMedia) + if err == nil { + return &cachedMedia, nil + } + message, err := GetMessage(ctx, client, messageID) + if err != nil { + return nil, err + } + file, err := FileFromMedia(message.Media) + if err != nil { + return nil, err + } + err = cache.GetCache().Set( + key, + file, + 3600, + ) + if err != nil { + return nil, err + } + return file, nil + // TODO: add photo support +} + +func ForwardMessages(ctx *ext.Context, fromChatId, logChannelID int64, messageID int) (*tg.Updates, error) { + fromPeer := ctx.PeerStorage.GetInputPeerById(fromChatId) + if fromPeer.Zero() { + return nil, fmt.Errorf("fromChatId: %d is not a valid peer", fromChatId) + } + toPeer, err := GetLogChannelPeer(ctx, ctx.Raw, ctx.PeerStorage, logChannelID) + if err != nil { + return nil, err + } + update, err := ctx.Raw.MessagesForwardMessages(ctx, &tg.MessagesForwardMessagesRequest{ + RandomID: []int64{rand.Int63()}, + FromPeer: fromPeer, + ID: []int{messageID}, + ToPeer: &tg.InputPeerChannel{ChannelID: toPeer.ChannelID, AccessHash: toPeer.AccessHash}, + }) + if err != nil { + return nil, err + } + return update.(*tg.Updates), nil +} + +func GetLogChannelPeer(ctx context.Context, api *tg.Client, peerStorage *storage.PeerStorage, logChannelID int64) (*tg.InputChannel, error) { + cachedInputPeer := peerStorage.GetInputPeerById(logChannelID) + + switch peer := cachedInputPeer.(type) { + case *tg.InputPeerEmpty: + break + case *tg.InputPeerChannel: + return &tg.InputChannel{ + ChannelID: peer.ChannelID, + AccessHash: peer.AccessHash, + }, nil + default: + return nil, errors.New("unexpected type of input peer") + } + inputChannel := &tg.InputChannel{ + ChannelID: logChannelID, + } + channels, err := api.ChannelsGetChannels(ctx, []tg.InputChannelClass{inputChannel}) + if err != nil { + return nil, err + } + if len(channels.GetChats()) == 0 { + return nil, errors.New("no channels found") + } + channel, ok := channels.GetChats()[0].(*tg.Channel) + if !ok { + return nil, errors.New("type assertion to *tg.Channel failed") + } + // Bruh, I literally have to call library internal functions at this point + peerStorage.AddPeer(channel.GetID(), channel.AccessHash, storage.TypeChannel, "") + return channel.AsInput(), nil +} diff --git a/main.go b/main.go index cd1f43d..2259764 100644 --- a/main.go +++ b/main.go @@ -1,984 +1,38 @@ package main import ( - "encoding/json" - "flag" "fmt" - "html/template" - "io" + "github.com/spf13/cobra" "log" - "net/http" "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/gorilla/mux" - "github.com/gorilla/websocket" - "github.com/zelenin/go-tdlib/client" + "webBridgeBot/internal/bot" + "webBridgeBot/internal/config" ) -type Config struct { - ApiID int - ApiHash string - BotToken string - BaseURL string - Port string - MaxFilesFolderSizeGB int64 - TdlibParameters *client.SetTdlibParametersRequest -} - -type TelegramBot struct { - config *Config - tdlibClient *client.Client - urlHistory map[int64]FileIdMeta -} - -type FileIdMeta map[int32]FileMeta - -type FileMeta struct { - URL string - MIMEType string - IsDownloadingCompleted bool - Size int64 - Duration int32 - Width int32 - Height int32 - FileName string -} - -var wsClients = make(map[int64]*websocket.Conn) // chatID to WebSocket connection - -var upgrader = websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { - return true - }, -} +var ( + cfgFile string + cfg config.Configuration +) func main() { - config := loadConfig() - initializeAndRunBot(config) -} - -func loadConfig() Config { - var ( - apiID = flag.Int("apiID", 0, "Telegram API ID") - apiHash = flag.String("apiHash", "", "Telegram API Hash") - botToken = flag.String("botToken", "", "Telegram Bot Token") - baseURL = flag.String("baseURL", "", "Base URL for the webhook") - port = flag.String("port", "8080", "Port on which the bot runs") - useIPAsBaseURL = flag.Bool("local", false, "Use the machine's IP address as the base URL") - maxFolderSizeGB = flag.Int64("maxFolderSizeGB", 10, "Maximum size of the download folder in gigabytes") - ) - flag.Parse() - - if *apiID == 0 { - log.Fatal("apiID flag is required and not set") - } - if *apiHash == "" { - log.Fatal("apiHash flag is required and not set") - } - if *botToken == "" { - log.Fatal("botToken flag is required and not set") - } - if *baseURL == "" { - log.Fatal("baseURL flag is required and not set") - } - - if *useIPAsBaseURL { - ips, err := findIPAddresses() - if err != nil { - fmt.Println("Error finding IP addresses:", err) - os.Exit(1) - } - if len(ips) > 0 { - *baseURL = "http://" + ips[0] + ":" + *port - } else { - fmt.Println("No valid IP address found. Using default base URL.") - } - } - - return Config{ - ApiID: *apiID, - ApiHash: *apiHash, - BotToken: *botToken, - BaseURL: *baseURL, - Port: *port, - MaxFilesFolderSizeGB: *maxFolderSizeGB, - TdlibParameters: &client.SetTdlibParametersRequest{ - UseTestDc: false, - DatabaseDirectory: filepath.Join(".tdlib", "database"), - FilesDirectory: filepath.Join(".tdlib", "files"), - UseFileDatabase: true, - UseChatInfoDatabase: true, - UseMessageDatabase: true, - UseSecretChats: false, - ApiId: int32(*apiID), - ApiHash: *apiHash, - SystemLanguageCode: "en", - DeviceModel: "Server", - SystemVersion: "1.0.0", - ApplicationVersion: "1.0.0", - EnableStorageOptimizer: true, - IgnoreFileNames: false, - }, - } -} - -func initializeAndRunBot(config Config) { - bot := &TelegramBot{ - config: &config, - urlHistory: make(map[int64]FileIdMeta), - } - bot.Run() -} - -func (b *TelegramBot) Run() { - // Initialize the TDLib client - authorizer := client.BotAuthorizer(b.config.BotToken) - authorizer.TdlibParameters <- b.config.TdlibParameters - b.tdlibClient = b.initTDLibClient(authorizer) - - // Get the bot's information - me := b.getMe() - log.Printf("Authorized as bot: %s", strings.Join(me.Usernames.ActiveUsernames, ", ")) - - // Start the web server - go b.startWebServer() - - // Start processing updates - listener := b.tdlibClient.GetListener() - defer listener.Close() - b.processUpdates(listener) -} - -func (b *TelegramBot) initTDLibClient(authorizer client.AuthorizationStateHandler) *client.Client { - // Set the log verbosity level - _, err := client.SetLogVerbosityLevel(&client.SetLogVerbosityLevelRequest{ - NewVerbosityLevel: 1, - }) - if err != nil { - log.Fatalf("SetLogVerbosityLevel error: %s", err) - } - - // Create a new TDLib client - tdlibClient, err := client.NewClient(authorizer) - if err != nil { - log.Fatalf("NewClient error: %s", err) - } - - // Get the TDLib version - optionValue, err := client.GetOption(&client.GetOptionRequest{ - Name: "version", - }) - if err != nil { - log.Fatalf("GetOption error: %s", err) - } - log.Printf("TDLib version: %s", optionValue.(*client.OptionValueString).Value) - - return tdlibClient -} - -func (b *TelegramBot) getMe() *client.User { - me, err := b.tdlibClient.GetMe() - if err != nil { - log.Fatalf("GetMe error: %s", err) - } - return me -} - -func (b *TelegramBot) processUpdates(listener *client.Listener) { - for update := range listener.Updates { - switch update.GetType() { - case client.TypeUpdateNewMessage: - log.Printf("Received UpdateNewMessage: %#v", update) - updateNewMessage := update.(*client.UpdateNewMessage) - message := updateNewMessage.Message - b.processMessage(message.ChatId, message) - case client.TypeUpdateUser: - log.Printf("Received UpdateUser: %#v", update) - updateUser := update.(*client.UpdateUser) - b.processUpdateUser(updateUser) - case client.TypeUpdateFile: - updateFile := update.(*client.UpdateFile) - b.processUpdateFile(updateFile) - break - case client.TypeMessage: - break - case client.TypeUpdateNewChat: - break - case client.TypeUpdateMessageSendSucceeded: - break - case client.TypeError: - errorMessage := update.(*client.Error) - log.Printf("Telegram Error Message: %d, %s", errorMessage.Code, errorMessage.Message) - break - default: - log.Printf("Unhandled update: %#v", update) - PrintAllFields(update) - } - } -} - -func (b *TelegramBot) processUpdateUser(update *client.UpdateUser) { - var activeUsernames []string - if update.User.Usernames != nil { - activeUsernames = update.User.Usernames.ActiveUsernames - } - log.Printf("UpdateUser - UserID: %d, FirstName: %s, LastName: %s, activeUsernames: %v", update.User.Id, update.User.FirstName, update.User.LastName, activeUsernames) - // Note: Access only the exported fields -} - -func (b *TelegramBot) processMessage(chatID int64, message *client.Message) { - switch message.Content.MessageContentType() { - case client.TypeMessageAudio: - audio := message.Content.(*client.MessageAudio).Audio - log.Printf("Audio: %s", audio.Audio.Id) - b.handleForwardedAudio(chatID, message) - case client.TypeMessageDocument: - document := message.Content.(*client.MessageDocument).Document - log.Printf("Document: %s", document.Document.Id) - b.handleForwardedDocument(chatID, message) - case client.TypeMessageVideo: - video := message.Content.(*client.MessageVideo).Video - log.Printf("Video: %d", video.Video.Id) - b.handleForwardedVideo(chatID, message) - case client.TypeMessagePhoto: - photo := message.Content.(*client.MessagePhoto).Photo - bestQualityPhoto := photo.Sizes[len(photo.Sizes)-1] - log.Printf("Photo: %d", bestQualityPhoto.Photo.Id) - b.handleForwardedPhoto(chatID, message) - case client.TypeMessageText: - text := message.Content.(*client.MessageText).Text.Text - log.Printf("Text: %s", text) - b.handleCommand(message) - - default: - log.Printf("Unhandled content type: %s", message.Content.MessageContentType()) - // Optionally handle unknown types - } -} - -func (b *TelegramBot) processUpdateFile(updateFile *client.UpdateFile) { - file := updateFile.File - localFile := file.Local - fileId := file.Id - - // Check if the file download is completed and update the URL download status - if localFile.IsDownloadingCompleted { - log.Printf("Download completed for file ID %d at path %s", fileId, localFile.Path) - b.updateURLDownloadStatus(fileId, true) - } else { - // If the download is not completed, you might want to update the status as well. Depending on your needs, - // you might skip this or handle it differently. - log.Printf("Downloading... File ID %d, downloaded %d of %d bytes.", fileId, localFile.DownloadedSize, file.Size) - b.updateURLDownloadStatus(fileId, false) - } -} - -func (b *TelegramBot) handleWebSocket(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - chatIDStr, ok := vars["chatID"] - if !ok { - http.Error(w, "Chat ID is required", http.StatusBadRequest) - return - } - - chatID, err := strconv.ParseInt(chatIDStr, 10, 64) - if err != nil { - http.Error(w, "Invalid chat ID", http.StatusBadRequest) - return - } - - ws, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println(err) - return - } - defer ws.Close() - - // Register client - wsClients[chatID] = ws - - for { - // Keep connection alive or handle control messages if necessary - // For example, read messages to prevent the connection from closing - messageType, p, err := ws.ReadMessage() - if err != nil { - log.Println(err) - delete(wsClients, chatID) - break - } - // Echo the message back (Optional, for keeping the connection alive) - if err := ws.WriteMessage(messageType, p); err != nil { - log.Println(err) - break - } - } -} - -func (b *TelegramBot) handleForwardedAudio(chatID int64, message *client.Message) { - audio := message.Content.(*client.MessageAudio).Audio - fileID := audio.Audio.Id - fileSize := audio.Audio.Size - fileURL := b.getFileURL(chatID, fileID) - - b.storeURLInHistory(chatID, fileID, fileURL, audio.MimeType, fileSize, audio.Duration, -1, -1, audio.FileName) - b.sendMessage(message.ChatId, fileURL) - - // Construct the message with the URL and type - wsMsg := map[string]string{ - "fileId": strconv.Itoa(int(fileID)), - "url": fileURL, - "mimeType": audio.MimeType, - "duration": strconv.Itoa(int(audio.Duration)), - "fileName": audio.FileName, - } - b.publishOverWS(chatID, wsMsg) -} - -func (b *TelegramBot) handleForwardedDocument(chatID int64, message *client.Message) { - document := message.Content.(*client.MessageDocument).Document - fileID := document.Document.Id - fileSize := document.Document.Size - fileURL := b.getFileURL(chatID, fileID) - b.storeURLInHistory(chatID, fileID, fileURL, document.MimeType, fileSize, -1, -1, -1, document.FileName) - b.sendMessage(message.ChatId, fileURL) -} - -func (b *TelegramBot) handleForwardedVideo(chatID int64, message *client.Message) { - videoContent := message.Content.(*client.MessageVideo) - video := videoContent.Video - fileID := video.Video.Id - fileSize := video.Video.Size - fileURL := b.getFileURL(chatID, video.Video.Id) - - // Store URL in history and send the message - b.storeURLInHistory(chatID, fileID, fileURL, video.MimeType, fileSize, video.Duration, video.Width, video.Height, video.FileName) - b.sendMessage(message.ChatId, fileURL) - - // Construct the message with the URL and type - wsMsg := map[string]string{ - "fileId": strconv.Itoa(int(video.Video.Id)), - "url": fileURL, - "mimeType": video.MimeType, - "duration": strconv.Itoa(int(video.Duration)), - "width": strconv.Itoa(int(video.Width)), - "height": strconv.Itoa(int(video.Height)), - "fileName": video.FileName, - } - b.publishOverWS(chatID, wsMsg) -} - -func (b *TelegramBot) handleForwardedPhoto(chatID int64, message *client.Message) { - photo := message.Content.(*client.MessagePhoto).Photo - bestQualityPhoto := photo.Sizes[len(photo.Sizes)-1] - fileID := bestQualityPhoto.Photo.Id - fileSize := bestQualityPhoto.Photo.Size - fileURL := b.getFileURL(chatID, fileID) - - // Example MIME type determination (simplified) - mimeType := "image/jpeg" // Default MIME type; consider a more dynamic approach as needed - - b.storeURLInHistory(chatID, fileID, fileURL, mimeType, fileSize, -1, bestQualityPhoto.Width, bestQualityPhoto.Height, "") - b.sendMessage(message.ChatId, fileURL) - - // Construct the message with the URL and type - wsMsg := map[string]string{ - "fileId": strconv.Itoa(int(fileID)), - "url": fileURL, - "mimeType": mimeType, - "width": strconv.Itoa(int(bestQualityPhoto.Width)), - "height": strconv.Itoa(int(bestQualityPhoto.Height)), - } - b.publishOverWS(chatID, wsMsg) -} - -func (b *TelegramBot) publishOverWS(chatID int64, message map[string]string) { - if client, ok := wsClients[chatID]; ok { - // Convert the message to JSON - messageJSON, err := json.Marshal(message) - if err != nil { - log.Println("Error marshalling message:", err) - return - } - - // Send the message over WebSocket - if err := client.WriteMessage(websocket.TextMessage, messageJSON); err != nil { - log.Println("Error sending message:", err) - delete(wsClients, chatID) - client.Close() - } - } -} - -func (b *TelegramBot) storeURLInHistory(chatID int64, fileID int32, url string, mimeType string, size int64, duration int32, width int32, height int32, fileName string) { - // Initialize the chatID entry in urlHistory if it doesn't exist - if _, ok := b.urlHistory[chatID]; !ok { - b.urlHistory[chatID] = make(FileIdMeta) - } - - // Check if the URL is already in the history for the given chatID - urlExists := false - for _, fileMeta := range b.urlHistory[chatID] { - if fileMeta.URL == url { - urlExists = true - break - } - } - - // Only add the URL, MIME type, IsDownloadingCompleted status, and fileId if the URL does not already exist - if !urlExists { - b.urlHistory[chatID][fileID] = FileMeta{ - URL: url, - MIMEType: mimeType, - IsDownloadingCompleted: false, - Size: size, - Duration: duration, - Width: width, - Height: height, - FileName: fileName, - } - } -} - -// Update the download status of a URL associated with a fileId in the urlHistory -func (b *TelegramBot) updateURLDownloadStatus(fileId int32, isDownloadingCompleted bool) { - // Iterate through each chat ID in the urlHistory - for _, fileIdMeta := range b.urlHistory { - // Check if the fileId exists in the FileIdMeta for the current chatID - if fileMeta, ok := fileIdMeta[fileId]; ok { - // Update the IsDownloadingCompleted status for the fileId - fileMeta.IsDownloadingCompleted = isDownloadingCompleted - // Since maps store references to objects, the original map is updated - // but to follow good practice and ensure clarity, update the map explicitly - fileIdMeta[fileId] = fileMeta - } - } -} - -func (b *TelegramBot) getMetaByURL(chatID int64, url string) (FileMeta, error) { - fmt.Printf("chatID: %d, url: %s, b.history: %v\n", chatID, url, b.urlHistory) - if urlMIMEs, ok := b.urlHistory[chatID]; ok { - for _, fileMeta := range urlMIMEs { - if fileMeta.URL == url { - return fileMeta, nil - } - } - return FileMeta{}, fmt.Errorf("URL not found in history") - } - return FileMeta{}, fmt.Errorf("No history for chatID") -} - -func (b *TelegramBot) handleCommand(message *client.Message) { - chatID := message.ChatId - webURL := fmt.Sprintf("%s/%d", b.config.BaseURL, chatID) - var text string - // Check if the command is '/start' - if strings.HasPrefix(message.Content.(*client.MessageText).Text.Text, "/start") { - text = "Welcome to WebBridgeBot, your bridge between Telegram and the Web!\n\n" - text += "Find out more about WebBridgeBot on GitHub: https://github.com/mshafiee/webbridgebot\n\n" - text += "Access your player and more features here:\n" + webURL + "\n\n" - } - - if strings.HasPrefix(message.Content.(*client.MessageText).Text.Text, "/url") { - text = "Access your player and more features here:\n" + webURL - } - - b.sendMessage(chatID, text) -} - -func (b *TelegramBot) getFileURL(chatID int64, fileID int32) string { - return fmt.Sprintf("%s/%d/%d", b.config.BaseURL, chatID, fileID) -} - -func (b *TelegramBot) sendMessage(chatID int64, text string) { - log.Printf("Sending message to chat %d: %s", chatID, text) - _, err := b.tdlibClient.SendMessage(&client.SendMessageRequest{ - ChatId: chatID, - InputMessageContent: &client.InputMessageText{ - Text: &client.FormattedText{ - Text: text, - Entities: nil, - }, - LinkPreviewOptions: nil, - ClearDraft: false, - }, - }) - if err != nil { - log.Printf("Error sending message: %v", err) - } -} - -func (b *TelegramBot) startWebServer() { - router := mux.NewRouter() - // Define the WebSocket route explicitly - router.HandleFunc("/ws/{chatID}", b.handleWebSocket) - - // Define other routes - router.HandleFunc("/{chatID}/{fileID}", b.handleFileDownload) - router.HandleFunc("/{chatID}", b.handlePlayer) - router.HandleFunc("/{chatID}/", b.handlePlayer) - - // Make sure the WebSocket route is not being caught by a more generic handler - - port := b.config.Port - log.Printf("Web server started on port %s", port) - if err := http.ListenAndServe(fmt.Sprintf(":%s", port), router); err != nil { - log.Panic(err) - } -} -func (b *TelegramBot) handleFileDownload(w http.ResponseWriter, r *http.Request) { - // Extract variables from request - chatID, fileID, err := b.extractRequestParameters(w, r) - if err != nil { - // Error already handled within extractRequestParameters - return - } - - // Download and open file - fp, err := b.downloadAndOpenFile(fileID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - defer fp.Close() - - // Get file metadata - fileSize, mimeType, err := b.getFileMetadata(fp, chatID, fileID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Handle range requests for partial content delivery - if rangeHeader := r.Header.Get("Range"); rangeHeader != "" { - b.servePartialContent(w, fp, rangeHeader, fileSize, mimeType) - } else { - // Serve the entire file - w.Header().Set("Content-Length", strconv.FormatInt(fileSize, 10)) - w.Header().Set("Content-Type", mimeType) - io.Copy(w, fp) // Stream the whole file - } -} - -func (b *TelegramBot) extractRequestParameters(w http.ResponseWriter, r *http.Request) (chatID int64, fileID int32, err error) { - vars := mux.Vars(r) - chatIDStr, fileIDStr := vars["chatID"], vars["fileID"] - - chatID, err = strconv.ParseInt(chatIDStr, 10, 64) - if err != nil { - http.Error(w, "Invalid chat ID", http.StatusBadRequest) - return 0, 0, err - } - - fileIDInt64, err := strconv.ParseInt(fileIDStr, 10, 32) - if err != nil { - http.Error(w, "Invalid file ID", http.StatusBadRequest) - return 0, 0, err - } - - return chatID, int32(fileIDInt64), nil -} - -func (b *TelegramBot) downloadAndOpenFile(fileID int32) (*os.File, error) { - // Attempt to clean up the download folder before downloading a new file - if b.isCleanupNeeded() { - go func() { - err := b.cleanUpDownloadFolderIfNeeded() + rootCmd := &cobra.Command{ + Use: "telegram-bot", + Short: "Telegram Bot", + Run: func(cmd *cobra.Command, args []string) { + cfg = config.LoadConfig() + b, err := bot.NewTelegramBot(&cfg) if err != nil { - log.Printf("Error cleaning up download folder: %v", err) - } - }() - } - - maxRetries := 10 - for i := 0; i < maxRetries; i++ { - file, err := b.tdlibClient.DownloadFile(&client.DownloadFileRequest{ - FileId: fileID, - Priority: 1, - }) - if err != nil { - if i == maxRetries-1 { - return nil, fmt.Errorf("Error downloading file after %d attempts: %v", maxRetries, err) + log.Fatalf("Error initializing Telegram bot: %v", err) } - time.Sleep(time.Duration(i+1) * time.Second) - continue - } - - fp, err := os.Open(file.Local.Path) - if err == nil { - return fp, nil - } - if i == maxRetries-1 { - return nil, fmt.Errorf("Error opening file after %d attempts: %v", maxRetries, err) - } - time.Sleep(time.Duration(i+1) * time.Second) - } - return nil, fmt.Errorf("Unhandled error in downloadAndOpenFile") -} - -func (b *TelegramBot) getFileMetadata(fp *os.File, chatID int64, fileID int32) (int64, string, error) { - fileInfo, err := fp.Stat() - if err != nil { - return 0, "", fmt.Errorf("Error getting file info: %v", err) - } - - fileMeta, err := b.getMetaByURL(chatID, b.getFileURL(chatID, fileID)) - if err != nil { - // Default MIME type if metadata is not found - return fileInfo.Size(), "application/octet-stream", nil - } - - return fileMeta.Size, fileMeta.MIMEType, nil -} - -func (b *TelegramBot) servePartialContent(w http.ResponseWriter, fp *os.File, rangeHeader string, fileSize int64, mimeType string) { - start, end, err := parseRange(rangeHeader, fileSize) - if err != nil { - http.Error(w, "Invalid Range Header", http.StatusBadRequest) - return - } - - contentLength := end - start + 1 - w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10)) - w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, fileSize)) - w.Header().Set("Content-Type", mimeType) - w.WriteHeader(http.StatusPartialContent) - - fp.Seek(start, 0) - io.CopyN(w, fp, contentLength) -} - -// parseRange parses a Range header string and returns the start and end byte positions -func parseRange(rangeStr string, fileSize int64) (start, end int64, err error) { - start = 0 - end = fileSize - 1 - rangeStr = strings.TrimPrefix(rangeStr, "bytes=") - parts := strings.Split(rangeStr, "-") - if parts[0] != "" { - start, err = strconv.ParseInt(parts[0], 10, 64) - if err != nil { - return - } - } - if parts[1] != "" { - end, err = strconv.ParseInt(parts[1], 10, 64) - if err != nil { - return - } - } - if start > end || end >= fileSize { - err = fmt.Errorf("invalid range") - } - return -} - -func (b *TelegramBot) isCleanupNeeded() bool { - var totalSize int64 - err := filepath.Walk(b.config.TdlibParameters.FilesDirectory, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - totalSize += info.Size() - } - return nil - }) - if err != nil { - log.Printf("Error walking through download folder: %v", err) - return false - } - // Convert maxFolderSize from GB to bytes for comparison - maxFolderSize := b.config.MaxFilesFolderSizeGB * 1024 * 1024 * 1024 - return totalSize > maxFolderSize -} - -func (b *TelegramBot) cleanUpDownloadFolderIfNeeded() error { - var totalSize int64 - fileList := make([]struct { - path string - modTime time.Time - size int64 - }, 0) - - // Convert maxFolderSize from GB to bytes for comparison - maxFolderSize := b.config.MaxFilesFolderSizeGB * 1024 * 1024 * 1024 - - // Walk through the download folder to calculate total size and collect file info - err := filepath.Walk(b.config.TdlibParameters.FilesDirectory, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - totalSize += info.Size() - fileList = append(fileList, struct { - path string - modTime time.Time - size int64 - }{ - path, info.ModTime(), info.Size(), - }) - } - return nil - }) - - if err != nil { - return err - } - - // Sort the files by modification time, oldest first - sort.Slice(fileList, func(i, j int) bool { - return fileList[i].modTime.Before(fileList[j].modTime) - }) - - // Remove files until the total size is within the limit - for totalSize > maxFolderSize && len(fileList) > 0 { - oldestFile := fileList[0] - fileList = fileList[1:] - err := os.Remove(oldestFile.path) - if err != nil { - return err - } - totalSize -= oldestFile.size - log.Printf("Removed file %s (oldest first) to reduce download folder size", oldestFile.path) - } - - return nil -} - -func (b *TelegramBot) handlePlayer(w http.ResponseWriter, r *http.Request) { - log.Printf("Received request for player: %s", r.URL.Path) - - vars := mux.Vars(r) - chatIDStr, ok := vars["chatID"] - if !ok { - http.Error(w, "Chat ID is required", http.StatusBadRequest) - return - } - - chatID, err := strconv.ParseInt(chatIDStr, 10, 64) - if err != nil { - http.Error(w, "Invalid chat ID", http.StatusBadRequest) - return + b.Run() + }, } - // Define the HTML template with embedded JavaScript - tmpl := ` - - - - - - Media Player - - - -

WebBridgeBot

-

Chat ID: {{.ChatID}}; Waiting for media...

- - - -
- - -
- - - - -` - t, err := template.New("webpage").Parse(tmpl) - if err != nil { - http.Error(w, "Failed to parse template", http.StatusInternalServerError) - return - } + rootCmd.Flags().StringVarP(&cfgFile, "cfg", "c", "", "cfg file (default is .env)") - err = t.Execute(w, map[string]interface{}{ - "ChatID": chatID, - }) - if err != nil { - http.Error(w, "Failed to execute template", http.StatusInternalServerError) - return + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) } } diff --git a/templates/player.html b/templates/player.html new file mode 100644 index 0000000..e6f307f --- /dev/null +++ b/templates/player.html @@ -0,0 +1,203 @@ + + + + + + WebBridgeBot Media Player - {{.ChatID}} + + + +

WebBridgeBot

+

Chat ID: {{.ChatID}}; Waiting for media...

+ + + +
+ + +
+ + + + \ No newline at end of file