Compare commits
10 Commits
fast-api-t
...
ccxt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ef0812480 | ||
|
|
05bb4d9359 | ||
|
|
f4961413bc | ||
|
|
37d3e7ec30 | ||
|
|
5466470a1e | ||
|
|
7e3dc05259 | ||
|
|
50806b9eb8 | ||
|
|
fe82bc0fe6 | ||
|
|
26381a3329 | ||
|
|
10da7af267 |
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -8,7 +8,7 @@ assignees: ''
|
||||
---
|
||||
|
||||
<!--
|
||||
IMPORTANT: Please open an issue ONLY if you find something wrong with the source code. For questions and feedback use Discord (https://jesse.trade/discord). Also make sure to give the documentation (https://docs.jesse.trade/) and FAQ (https://jesse.trade/help) a good read to eliminate the possibility of causing the problem due to wrong usage. Make sure you are using the most recent version `pip show jesse` and updated all requirements `pip install -r https://raw.githubusercontent.com/jesse-ai/jesse/master/requirements.txt`.
|
||||
IMPORTANT: Please open an issue ONLY if you find something wrong with the source code. For questions and feedback use the Forum (https://forum.jesse.trade/) or Discord (https://jesse.trade/discord). Also make sure to give the documentation (https://docs.jesse.trade/) a good read to eliminate the possibility of causing the problem due to wrong usage. Make sure you are using the most recent version `pip show jesse` and updated all requirements `pip install -r https://raw.githubusercontent.com/jesse-ai/jesse/master/requirements.txt`.
|
||||
-->
|
||||
|
||||
**Describe the bug**
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -8,7 +8,7 @@ assignees: ''
|
||||
---
|
||||
|
||||
<!---
|
||||
Make sure to check the roadmap (https://docs.jesse.trade/docs/roadmap.html) and the Trello boards linked there whether your idea is already listed and give Jesse's documentation a good read to make sure you don't request something that's already possible. If possible use Discord (https://jesse.trade/discord) to discuss your ideas with the community.
|
||||
Make sure to check the Projects tab if your idea is already listed and give Jesse's documentation a good read to make sure you don't request something that's already possible.
|
||||
-->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
|
||||
11
.github/dependabot.yml
vendored
11
.github/dependabot.yml
vendored
@@ -1,11 +0,0 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
98
.github/workflows/codeql-analysis.yml
vendored
98
.github/workflows/codeql-analysis.yml
vendored
@@ -1,98 +0,0 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '0 0 1 * *'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache pip
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ matrix.path }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
- name: Install ta-lib
|
||||
run: |
|
||||
if ([ "$RUNNER_OS" = "macOS" ]); then
|
||||
brew install ta-lib
|
||||
fi
|
||||
if ([ "$RUNNER_OS" = "Linux" ]); then
|
||||
if [ ! -f "$GITHUB_WORKSPACE/ta-lib/src" ]; then wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz -q && tar -xzf ta-lib-0.4.0-src.tar.gz; fi
|
||||
cd ta-lib/
|
||||
./configure --prefix=/usr
|
||||
if [ ! -f "$HOME/ta-lib/src" ]; then make; fi
|
||||
sudo make install
|
||||
cd
|
||||
fi
|
||||
if ([ "$RUNNER_OS" = "Windows" ]); then
|
||||
curl -sL http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-msvc.zip -o $GITHUB_WORKSPACE/ta-lib.zip --create-dirs && 7z x $GITHUB_WORKSPACE/ta-lib.zip -o/c/ta-lib && mv /c/ta-lib/ta-lib/* /c/ta-lib/ && rm -rf /c/ta-lib/ta-lib && cd /c/ta-lib/c/make/cdr/win32/msvc && nmake
|
||||
fi
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
pip install numba
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
81
.github/workflows/python-package.yml
vendored
81
.github/workflows/python-package.yml
vendored
@@ -1,81 +0,0 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a single version of Python
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Python application
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ${{matrix.os}}
|
||||
strategy:
|
||||
matrix:
|
||||
# os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
os: [ubuntu-latest]
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
path: ~/.cache/pip
|
||||
#- os: macos-latest
|
||||
# path: ~/Library/Caches/pip
|
||||
#- os: windows-latest
|
||||
# path: ~\AppData\Local\pip\Cache
|
||||
python-version: [3.7, 3.8, 3.9]
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Cache pip
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ matrix.path }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
- name: Install ta-lib
|
||||
run: |
|
||||
if ([ "$RUNNER_OS" = "macOS" ]); then
|
||||
brew install ta-lib
|
||||
fi
|
||||
if ([ "$RUNNER_OS" = "Linux" ]); then
|
||||
if [ ! -f "$GITHUB_WORKSPACE/ta-lib/src" ]; then wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz -q && tar -xzf ta-lib-0.4.0-src.tar.gz; fi
|
||||
cd ta-lib/
|
||||
./configure --prefix=/usr
|
||||
if [ ! -f "$HOME/ta-lib/src" ]; then make; fi
|
||||
sudo make install
|
||||
cd
|
||||
fi
|
||||
if ([ "$RUNNER_OS" = "Windows" ]); then
|
||||
curl -sL http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-msvc.zip -o $GITHUB_WORKSPACE/ta-lib.zip --create-dirs && 7z x $GITHUB_WORKSPACE/ta-lib.zip -o/c/ta-lib && mv /c/ta-lib/ta-lib/* /c/ta-lib/ && rm -rf /c/ta-lib/ta-lib && cd /c/ta-lib/c/make/cdr/win32/msvc && nmake
|
||||
fi
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
pip install numba
|
||||
# - name: Lint with flake8
|
||||
# run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- name: Install jesse
|
||||
run: |
|
||||
pip install -e . -U
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pip install pytest
|
||||
pytest
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -147,5 +147,4 @@ cython_debug/
|
||||
/storage/*.gz
|
||||
/.vagrant
|
||||
testing-*.py
|
||||
/storage/full-reports/*.html
|
||||
/storage/logs/*
|
||||
/storage/full-reports/*.html
|
||||
@@ -2,7 +2,7 @@ FROM python:3.9.4-slim
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get -y install git build-essential libssl-dev \
|
||||
&& apt-get -y install build-essential libssl-dev \
|
||||
&& apt-get clean \
|
||||
&& pip install --upgrade pip
|
||||
|
||||
|
||||
239
README.md
239
README.md
@@ -1,8 +1,94 @@
|
||||
# Jesse beta (GUI dashboard)
|
||||
# Jesse
|
||||
[](https://pypi.org/project/jesse)
|
||||
[](https://pepy.tech/project/jesse)
|
||||
[](https://hub.docker.com/r/salehmir/jesse)
|
||||
[](https://github.com/jesse-ai/jesse)
|
||||
|
||||
Here's a quick guide on how to set up and run the dashboard branch until it is officially released.
|
||||
---
|
||||
|
||||
[](https://jesse.trade)
|
||||
[](https://docs.jesse.trade)
|
||||
[](https://jesse.trade/help)
|
||||
[](https://jesse.trade/discord)
|
||||
[](https://jesse.trade/blog)
|
||||
---
|
||||
Jesse is an advanced crypto trading framework which aims to simplify researching and defining trading strategies.
|
||||
|
||||
## Why Jesse?
|
||||
In short, Jesse is more accurate than other solutions, and way more simple.
|
||||
In fact, it is so simple that in case you already know Python, you can get started today, in matter of minutes, instead of weeks and months.
|
||||
|
||||
[Here](https://docs.jesse.trade/docs/) you can read more about why Jesse's features.
|
||||
|
||||
## Getting Started
|
||||
Head over to the "getting started" section of the [documentation](https://docs.jesse.trade/docs/getting-started). The
|
||||
documentation is short yet very informative.
|
||||
|
||||
## Example Backtest Results
|
||||
|
||||
Check out Jesse's [blog](https://jesse.trade/blog) for tutorials that go through example strategies step by step.
|
||||
|
||||
Here's an example output for a backtest simulation just to get you excited:
|
||||
```
|
||||
CANDLES |
|
||||
----------------------+--------------------------
|
||||
period | 1792 days (4.91 years)
|
||||
starting-ending date | 2016-01-01 => 2020-11-27
|
||||
|
||||
|
||||
exchange | symbol | timeframe | strategy | DNA
|
||||
------------+----------+-------------+------------------+-------
|
||||
Bitfinex | BTC-USD | 6h | TrendFollowing05 |
|
||||
|
||||
|
||||
Executing simulation... [####################################] 100%
|
||||
Executed backtest simulation in: 135.85 seconds
|
||||
|
||||
|
||||
METRICS |
|
||||
---------------------------------+------------------------------------
|
||||
Total Closed Trades | 221
|
||||
Total Net Profit | 1,699,245.56 (1699.25%)
|
||||
Starting => Finishing Balance | 100,000 => 1,799,245.56
|
||||
Total Open Trades | 0
|
||||
Open PL | 0
|
||||
Total Paid Fees | 331,480.93
|
||||
Max Drawdown | -22.42%
|
||||
Annual Return | 80.09%
|
||||
Expectancy | 7,688.89 (7.69%)
|
||||
Avg Win | Avg Loss | 31,021.9 | 8,951.7
|
||||
Ratio Avg Win / Avg Loss | 3.47
|
||||
Percent Profitable | 42%
|
||||
Longs | Shorts | 60% | 40%
|
||||
Avg Holding Time | 3.0 days, 22.0 hours, 50.0 minutes
|
||||
Winning Trades Avg Holding Time | 6.0 days, 14.0 hours, 9.0 minutes
|
||||
Losing Trades Avg Holding Time | 2.0 days, 1.0 hour, 41.0 minutes
|
||||
Sharpe Ratio | 1.88
|
||||
Calmar Ratio | 3.57
|
||||
Sortino Ratio | 3.51
|
||||
Omega Ratio | 1.49
|
||||
Winning Streak | 5
|
||||
Losing Streak | 10
|
||||
Largest Winning Trade | 205,575.89
|
||||
Largest Losing Trade | -50,827.92
|
||||
Total Winning Trades | 92
|
||||
Total Losing Trades | 129
|
||||
```
|
||||
|
||||
And here are generated charts:
|
||||

|
||||
|
||||
## What's next?
|
||||
This is the very initial release. There's way more. Subscribe to our mailing list at [jesse.trade](https://jesse.trade) to get the good stuff as soon they're released. Don't worry, We won't send you spam. Pinky promise.
|
||||
|
||||
## Community
|
||||
I created a [discord server](https://jesse.trade/discord) for Jesse users to discuss algo-trading. It's a warm place to share ideas, and help each other out.
|
||||
|
||||
## How to contribute
|
||||
Thank you for your interest in contributing to the project. Before starting to work on a PR, please make sure that it isn't under the "in progress" column in our [Github project page](https://github.com/jesse-ai/jesse/projects/2). In case you want to help but don't know what tasks we need help for, checkout the "todo" column.
|
||||
|
||||
First, you need to install Jesse from the repository instead of PyPi:
|
||||
|
||||
First, you need to set up Jesse from the source code if you haven't already:
|
||||
```sh
|
||||
# first, make sure that the PyPi version is not installed
|
||||
pip uninstall jesse
|
||||
@@ -13,149 +99,12 @@ cd jesse
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Then you need to switch to the `beta` branch:
|
||||
```sh
|
||||
git checkout beta
|
||||
Now every change you make to the code will be affected immediately.
|
||||
|
||||
After every change, make sure your changes did not break any functionality by running tests:
|
||||
```
|
||||
|
||||
Now go to your Jesse project (where you used to run backtest command, etc) and first create a `.env` file with the below configuration:
|
||||
|
||||
```sh
|
||||
nano .env
|
||||
```
|
||||
|
||||
```
|
||||
PASSWORD=test
|
||||
|
||||
POSTGRES_HOST=127.0.0.1
|
||||
POSTGRES_NAME=jesse_db
|
||||
POSTGRES_PORT=5432
|
||||
POSTGRES_USERNAME=jesse_user
|
||||
POSTGRES_PASSWORD=password
|
||||
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# Live Trade Only #
|
||||
# =============================================================================== #
|
||||
# Below values don't concern you if you haven't installed the live trade plugin #
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
|
||||
# For all notifications
|
||||
GENERAL_TELEGRAM_BOT_TOKEN=
|
||||
GENERAL_TELEGRAM_BOT_CHAT_ID=
|
||||
GENERAL_DISCORD_WEBHOOK=
|
||||
|
||||
# For error notifications only
|
||||
ERROR_TELEGRAM_BOT_TOKEN=
|
||||
ERROR_TELEGRAM_BOT_CHAT_ID=
|
||||
ERROR_DISCORD_WEBHOOK=
|
||||
|
||||
# Testnet Binance Futures:
|
||||
# http://testnet.binancefuture.com
|
||||
TESTNET_BINANCE_FUTURES_API_KEY=
|
||||
TESTNET_BINANCE_FUTURES_API_SECRET=
|
||||
|
||||
# Binance Futures:
|
||||
# https://www.binance.com/en/futures/btcusdt
|
||||
BINANCE_FUTURES_API_KEY=
|
||||
BINANCE_FUTURES_API_SECRET=
|
||||
|
||||
# FTX Futures:
|
||||
# https://ftx.com/markets/future
|
||||
FTX_FUTURES_API_KEY=
|
||||
FTX_FUTURES_API_SECRET=
|
||||
# leave empty if it's the main account and not a subaccount
|
||||
FTX_FUTURES_SUBACCOUNT_NAME=
|
||||
```
|
||||
|
||||
Of course, you should change the values to your config if you're not using the default values. Also, don't forget to change the `PASSWORD` as you need it for logging in. You no longer need `routes.py` and `config.py`, or even `live-config.py` files in your Jesse project. You can delete them if you want.
|
||||
|
||||
## New Requirements
|
||||
First, install Redis which is a requirement for this application. I will add guides for different environments but for now, you should be able to find guides on the net. On a mac, it's as easy as running `brew install redis`. On Ubuntu 20.04:
|
||||
|
||||
```sh
|
||||
sudo apt update -y
|
||||
sudo apt install redis-server -y
|
||||
# The supervised directive is set to no by default. So let's edit it:
|
||||
sudo nano /etc/redis/redis.conf
|
||||
# Find the line that says `supervised no` and change it to `supervised systemd`
|
||||
sudo systemctl restart redis.service
|
||||
```
|
||||
|
||||
Then you need to install few pip packages as well. A quick way to install them all is by running:
|
||||
```sh
|
||||
pip install -r https://raw.githubusercontent.com/jesse-ai/jesse/beta/requirements.txt
|
||||
```
|
||||
|
||||
## Start the application
|
||||
|
||||
To get the party started, (inside your Jesse project) run the application by:
|
||||
```
|
||||
jesse run
|
||||
```
|
||||
|
||||
And it will print a local URL for you to open in your browser such as:
|
||||
```
|
||||
INFO: Started server process [66103]
|
||||
INFO: Waiting for application startup.
|
||||
INFO: Application startup complete.
|
||||
INFO: Uvicorn running on http://0.0.0.0:9000 (Press CTRL+C to quit)
|
||||
```
|
||||
|
||||
So go ahead and open (in my case) `http://127.0.0.1:9000` in your browser of choice. If you are running on a server, you can use the IP address of the server instead of
|
||||
`127.0.0.1`. So for example if the IP address of your server is `1.2.3.4` the URL would be `http://1.2.3.4:9000`. I will soon add instructions on how to secure the remote server that is running the application.
|
||||
|
||||
## Live Trade Plugin
|
||||
To install the beta version of the live trade plugin, first, make sure to uninstall the previous one:
|
||||
```
|
||||
pip uninstall jesse-live
|
||||
```
|
||||
|
||||
Now you need to change your account on Jesse.Trade as a beta user. You'll find it at your [profile](https://jesse.trade/user/profile) page:
|
||||
|
||||

|
||||
|
||||
Now you can see the latest beta version on the [releases](http://jesse.trade/releases) page. Download and install it as always.
|
||||
|
||||
## Security
|
||||
In case you are running the application on a remote server, you should secure the server. I will mention two methods here, but of course security is a big topic but I think these two methods are enough.
|
||||
|
||||
### 1. Password
|
||||
Change the password (`PASSWORD`) in your `.env` file. Make sure to set it to something secure.
|
||||
|
||||
### 2. Firewall
|
||||
The dashboard is supposed to be accessible only by you. That makes it easy to secure. So the best way is to just close all incoming ports except
|
||||
for the ones you need. But open them **only for your trusted IP addresses**. This can be done via both a firewall from within the server or the firewall that your cloud provider provides (Hetzner, DigitalOcean, etc).
|
||||
|
||||
I will show you how to do it via ufw which is a popular firewall that comes with Ubuntu 20.04:
|
||||
|
||||
```sh
|
||||
ufw status
|
||||
# if it's active, stop it:
|
||||
systemctl stop ufw
|
||||
# allow all outgoing traffic
|
||||
ufw default allow outgoing
|
||||
# deny all incoming traffic
|
||||
ufw default deny incoming
|
||||
# allow ssh port (22)
|
||||
ufw allow ssh
|
||||
# If you don't have specific IP addresses, you can open the targeted port
|
||||
# (9000 by default) for all, but it's best to allow specific IP addre~~sses only.
|
||||
# Assuming your IP addresses are 1.1.1.1, 1.1.1.2, and 1.1.1.3, run:
|
||||
ufw allow from 1.1.1.1 to any port 9000 proto tcp
|
||||
ufw allow from 1.1.1.2 to any port 9000 proto tcp
|
||||
ufw allow from 1.1.1.3 to any port 9000 proto tcp
|
||||
# enable the firewall
|
||||
ufw enable
|
||||
# check the status
|
||||
ufw status numbered
|
||||
# restart ufw to apply the changes
|
||||
systemctl restart ufw
|
||||
pytest
|
||||
```
|
||||
|
||||
## Disclaimer
|
||||
**This is version is the beta version of an early-access plugin! That means you should NOT use it in production yet! If done otherwise, only you are responsible.**
|
||||
This software is for educational purposes only. USE THE SOFTWARE AT YOUR OWN RISK. THE AUTHORS AND ALL AFFILIATES ASSUME NO RESPONSIBILITY FOR YOUR TRADING RESULTS. Do not risk money which you are afraid to lose. There might be bugs in the code - this software DOES NOT come with ANY warranty.
|
||||
|
||||
@@ -1,41 +1,37 @@
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
# Hide the "FutureWarning: pandas.util.testing is deprecated." caused by empyrical
|
||||
import warnings
|
||||
from typing import Optional
|
||||
from pydoc import locate
|
||||
|
||||
import click
|
||||
import pkg_resources
|
||||
from fastapi import BackgroundTasks, Query, Header
|
||||
from starlette.websockets import WebSocket, WebSocketDisconnect
|
||||
from fastapi.responses import JSONResponse, FileResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from jesse.services import auth as authenticator
|
||||
from jesse.services.redis import async_redis, async_publish, sync_publish
|
||||
from jesse.services.web import fastapi_app, BacktestRequestJson, ImportCandlesRequestJson, CancelRequestJson, \
|
||||
LoginRequestJson, ConfigRequestJson, LoginJesseTradeRequestJson, NewStrategyRequestJson, FeedbackRequestJson, \
|
||||
ReportExceptionRequestJson, OptimizationRequestJson
|
||||
import uvicorn
|
||||
from asyncio import Queue
|
||||
import jesse.helpers as jh
|
||||
import time
|
||||
|
||||
# to silent stupid pandas warnings
|
||||
import jesse.helpers as jh
|
||||
|
||||
warnings.simplefilter(action='ignore', category=FutureWarning)
|
||||
|
||||
# Python version validation.
|
||||
if jh.python_version() < 3.7:
|
||||
print(
|
||||
jh.color(
|
||||
f'Jesse requires Python version above 3.7. Yours is {jh.python_version()}',
|
||||
'red'
|
||||
)
|
||||
)
|
||||
|
||||
# variable to know if the live trade plugin is installed
|
||||
HAS_LIVE_TRADE_PLUGIN = True
|
||||
try:
|
||||
import jesse_live
|
||||
except ModuleNotFoundError:
|
||||
HAS_LIVE_TRADE_PLUGIN = False
|
||||
# fix directory issue
|
||||
sys.path.insert(0, os.getcwd())
|
||||
|
||||
ls = os.listdir('.')
|
||||
is_jesse_project = 'strategies' in ls and 'config.py' in ls and 'storage' in ls and 'routes.py' in ls
|
||||
|
||||
|
||||
def validate_cwd() -> None:
|
||||
"""
|
||||
make sure we're in a Jesse project
|
||||
"""
|
||||
if not jh.is_jesse_project():
|
||||
if not is_jesse_project:
|
||||
print(
|
||||
jh.color(
|
||||
'Current directory is not a Jesse project. You must run commands from the root of a Jesse project.',
|
||||
@@ -45,130 +41,188 @@ def validate_cwd() -> None:
|
||||
os._exit(1)
|
||||
|
||||
|
||||
# print(os.path.dirname(jesse))
|
||||
JESSE_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
def inject_local_config() -> None:
|
||||
"""
|
||||
injects config from local config file
|
||||
"""
|
||||
local_config = locate('config.config')
|
||||
from jesse.config import set_config
|
||||
set_config(local_config)
|
||||
|
||||
|
||||
# load homepage
|
||||
@fastapi_app.get("/")
|
||||
async def index():
|
||||
return FileResponse(f"{JESSE_DIR}/static/index.html")
|
||||
def inject_local_routes() -> None:
|
||||
"""
|
||||
injects routes from local routes folder
|
||||
"""
|
||||
local_router = locate('routes')
|
||||
from jesse.routes import router
|
||||
|
||||
router.set_routes(local_router.routes)
|
||||
router.set_extra_candles(local_router.extra_candles)
|
||||
|
||||
|
||||
@fastapi_app.post("/terminate-all")
|
||||
async def terminate_all(authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
process_manager.flush()
|
||||
return JSONResponse({'message': 'terminating all tasks...'})
|
||||
# inject local files
|
||||
if is_jesse_project:
|
||||
inject_local_config()
|
||||
inject_local_routes()
|
||||
|
||||
|
||||
@fastapi_app.post("/shutdown")
|
||||
async def shutdown(background_tasks: BackgroundTasks, authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
def register_custom_exception_handler() -> None:
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
import logging
|
||||
from jesse.services import logger as jesse_logger
|
||||
import click
|
||||
from jesse import exceptions
|
||||
|
||||
background_tasks.add_task(jh.terminate_app)
|
||||
return JSONResponse({'message': 'Shutting down...'})
|
||||
log_format = "%(message)s"
|
||||
os.makedirs('storage/logs', exist_ok=True)
|
||||
|
||||
if jh.is_livetrading():
|
||||
logging.basicConfig(filename='storage/logs/live-trade.txt', level=logging.INFO,
|
||||
filemode='w', format=log_format)
|
||||
elif jh.is_paper_trading():
|
||||
logging.basicConfig(filename='storage/logs/paper-trade.txt', level=logging.INFO,
|
||||
filemode='w',
|
||||
format=log_format)
|
||||
elif jh.is_collecting_data():
|
||||
logging.basicConfig(filename='storage/logs/collect.txt', level=logging.INFO, filemode='w',
|
||||
format=log_format)
|
||||
elif jh.is_optimizing():
|
||||
logging.basicConfig(filename='storage/logs/optimize.txt', level=logging.INFO, filemode='w',
|
||||
format=log_format)
|
||||
else:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
@fastapi_app.post("/auth")
|
||||
def auth(json_request: LoginRequestJson):
|
||||
return authenticator.password_to_token(json_request.password)
|
||||
# main thread
|
||||
def handle_exception(exc_type, exc_value, exc_traceback) -> None:
|
||||
if issubclass(exc_type, KeyboardInterrupt):
|
||||
sys.excepthook(exc_type, exc_value, exc_traceback)
|
||||
return
|
||||
|
||||
# handle Breaking exceptions
|
||||
if exc_type in [
|
||||
exceptions.InvalidConfig, exceptions.RouteNotFound, exceptions.InvalidRoutes,
|
||||
exceptions.CandleNotFoundInDatabase
|
||||
]:
|
||||
click.clear()
|
||||
print(f"{'=' * 30} EXCEPTION TRACEBACK:")
|
||||
traceback.print_tb(exc_traceback, file=sys.stdout)
|
||||
print("=" * 73)
|
||||
print(
|
||||
'\n',
|
||||
jh.color('Uncaught Exception:', 'red'),
|
||||
jh.color(f'{exc_type.__name__}: {exc_value}', 'yellow')
|
||||
)
|
||||
return
|
||||
|
||||
@fastapi_app.post("/make-strategy")
|
||||
def make_strategy(json_request: NewStrategyRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.services import strategy_maker
|
||||
return strategy_maker.generate(json_request.name)
|
||||
|
||||
|
||||
@fastapi_app.post("/feedback")
|
||||
def feedback(json_request: FeedbackRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.services import jesse_trade
|
||||
return jesse_trade.feedback(json_request.description, json_request.email)
|
||||
|
||||
|
||||
@fastapi_app.post("/report-exception")
|
||||
def report_exception(json_request: ReportExceptionRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.services import jesse_trade
|
||||
return jesse_trade.report_exception(
|
||||
json_request.description, json_request.traceback, json_request.mode, json_request.attach_logs, json_request.session_id, json_request.email
|
||||
)
|
||||
|
||||
|
||||
@fastapi_app.post("/get-config")
|
||||
def get_config(json_request: ConfigRequestJson, authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.modes.data_provider import get_config as gc
|
||||
|
||||
return JSONResponse({
|
||||
'data': gc(json_request.current_config, has_live=HAS_LIVE_TRADE_PLUGIN)
|
||||
}, status_code=200)
|
||||
|
||||
|
||||
@fastapi_app.post("/update-config")
|
||||
def update_config(json_request: ConfigRequestJson, authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.modes.data_provider import update_config as uc
|
||||
|
||||
uc(json_request.current_config)
|
||||
|
||||
return JSONResponse({'message': 'Updated configurations successfully'}, status_code=200)
|
||||
|
||||
|
||||
@fastapi_app.websocket("/ws")
|
||||
async def websocket_endpoint(websocket: WebSocket, token: str = Query(...)):
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
if not authenticator.is_valid_token(token):
|
||||
return
|
||||
|
||||
await websocket.accept()
|
||||
|
||||
queue = Queue()
|
||||
ch, = await async_redis.psubscribe('channel:*')
|
||||
|
||||
async def echo(q):
|
||||
while True:
|
||||
msg = await q.get()
|
||||
msg = json.loads(msg)
|
||||
msg['id'] = process_manager.get_client_id(msg['id'])
|
||||
await websocket.send_json(
|
||||
msg
|
||||
# send notifications if it's a live session
|
||||
if jh.is_live():
|
||||
jesse_logger.error(
|
||||
f'{exc_type.__name__}: {exc_value}'
|
||||
)
|
||||
|
||||
async def reader(channel, q):
|
||||
async for ch, message in channel.iter():
|
||||
# modify id and set the one that the font-end knows
|
||||
await q.put(message)
|
||||
if jh.is_live() or jh.is_collecting_data():
|
||||
logging.error("Uncaught Exception:", exc_info=(exc_type, exc_value, exc_traceback))
|
||||
else:
|
||||
print(f"{'=' * 30} EXCEPTION TRACEBACK:")
|
||||
traceback.print_tb(exc_traceback, file=sys.stdout)
|
||||
print("=" * 73)
|
||||
print(
|
||||
'\n',
|
||||
jh.color('Uncaught Exception:', 'red'),
|
||||
jh.color(f'{exc_type.__name__}: {exc_value}', 'yellow')
|
||||
)
|
||||
|
||||
asyncio.get_running_loop().create_task(reader(ch, queue))
|
||||
asyncio.get_running_loop().create_task(echo(queue))
|
||||
if jh.is_paper_trading():
|
||||
print(
|
||||
jh.color(
|
||||
'An uncaught exception was raised. Check the log file at:\nstorage/logs/paper-trade.txt',
|
||||
'red'
|
||||
)
|
||||
)
|
||||
elif jh.is_livetrading():
|
||||
print(
|
||||
jh.color(
|
||||
'An uncaught exception was raised. Check the log file at:\nstorage/logs/live-trade.txt',
|
||||
'red'
|
||||
)
|
||||
)
|
||||
elif jh.is_collecting_data():
|
||||
print(
|
||||
jh.color(
|
||||
'An uncaught exception was raised. Check the log file at:\nstorage/logs/collect.txt',
|
||||
'red'
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
while True:
|
||||
# just so WebSocketDisconnect would be raised on connection close
|
||||
await websocket.receive_text()
|
||||
except WebSocketDisconnect:
|
||||
await async_redis.punsubscribe('channel:*')
|
||||
print('Websocket disconnected')
|
||||
sys.excepthook = handle_exception
|
||||
|
||||
# other threads
|
||||
if jh.python_version() >= 3.8:
|
||||
def handle_thread_exception(args) -> None:
|
||||
if args.exc_type == SystemExit:
|
||||
return
|
||||
|
||||
# handle Breaking exceptions
|
||||
if args.exc_type in [
|
||||
exceptions.InvalidConfig, exceptions.RouteNotFound, exceptions.InvalidRoutes,
|
||||
exceptions.CandleNotFoundInDatabase
|
||||
]:
|
||||
click.clear()
|
||||
print(f"{'=' * 30} EXCEPTION TRACEBACK:")
|
||||
traceback.print_tb(args.exc_traceback, file=sys.stdout)
|
||||
print("=" * 73)
|
||||
print(
|
||||
'\n',
|
||||
jh.color('Uncaught Exception:', 'red'),
|
||||
jh.color(f'{args.exc_type.__name__}: {args.exc_value}', 'yellow')
|
||||
)
|
||||
return
|
||||
|
||||
# send notifications if it's a live session
|
||||
if jh.is_live():
|
||||
jesse_logger.error(
|
||||
f'{args.exc_type.__name__}: { args.exc_value}'
|
||||
)
|
||||
|
||||
if jh.is_live() or jh.is_collecting_data():
|
||||
logging.error("Uncaught Exception:",
|
||||
exc_info=(args.exc_type, args.exc_value, args.exc_traceback))
|
||||
else:
|
||||
print(f"{'=' * 30} EXCEPTION TRACEBACK:")
|
||||
traceback.print_tb(args.exc_traceback, file=sys.stdout)
|
||||
print("=" * 73)
|
||||
print(
|
||||
'\n',
|
||||
jh.color('Uncaught Exception:', 'red'),
|
||||
jh.color(f'{args.exc_type.__name__}: {args.exc_value}', 'yellow')
|
||||
)
|
||||
|
||||
if jh.is_paper_trading():
|
||||
print(
|
||||
jh.color(
|
||||
'An uncaught exception was raised. Check the log file at:\nstorage/logs/paper-trade.txt',
|
||||
'red'
|
||||
)
|
||||
)
|
||||
elif jh.is_livetrading():
|
||||
print(
|
||||
jh.color(
|
||||
'An uncaught exception was raised. Check the log file at:\nstorage/logs/live-trade.txt',
|
||||
'red'
|
||||
)
|
||||
)
|
||||
elif jh.is_collecting_data():
|
||||
print(
|
||||
jh.color(
|
||||
'An uncaught exception was raised. Check the log file at:\nstorage/logs/collect.txt',
|
||||
'red'
|
||||
)
|
||||
)
|
||||
|
||||
threading.excepthook = handle_thread_exception
|
||||
|
||||
|
||||
# create a Click group
|
||||
@@ -179,294 +233,275 @@ def cli() -> None:
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
'--strict/--no-strict', default=True,
|
||||
help='Default is the strict mode which will raise an exception if the values for license is not set.'
|
||||
)
|
||||
def install_live(strict: bool) -> None:
|
||||
from jesse.services.installer import install
|
||||
install(HAS_LIVE_TRADE_PLUGIN, strict)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def run() -> None:
|
||||
@click.argument('exchange', required=True, type=str)
|
||||
@click.argument('symbol', required=True, type=str)
|
||||
@click.argument('start_date', required=True, type=str)
|
||||
@click.option('--skip-confirmation', is_flag=True,
|
||||
help="Will prevent confirmation for skipping duplicates")
|
||||
def import_candles(exchange: str, symbol: str, start_date: str, skip_confirmation: bool) -> None:
|
||||
"""
|
||||
imports historical candles from exchange
|
||||
"""
|
||||
validate_cwd()
|
||||
from jesse.config import config
|
||||
config['app']['trading_mode'] = 'import-candles'
|
||||
|
||||
# run all the db migrations
|
||||
from jesse.services.migrator import run as run_migrations
|
||||
import peewee
|
||||
try:
|
||||
run_migrations()
|
||||
except peewee.OperationalError:
|
||||
sleep_seconds = 10
|
||||
print(f"Database wasn't ready. Sleep for {sleep_seconds} seconds and try again.")
|
||||
time.sleep(sleep_seconds)
|
||||
run_migrations()
|
||||
register_custom_exception_handler()
|
||||
|
||||
# read port from .env file, if not found, use default
|
||||
from jesse.services.env import ENV_VALUES
|
||||
if 'APP_PORT' in ENV_VALUES:
|
||||
port = int(ENV_VALUES['APP_PORT'])
|
||||
else:
|
||||
port = 9000
|
||||
|
||||
# run the main application
|
||||
uvicorn.run(fastapi_app, host="0.0.0.0", port=port, log_level="info")
|
||||
|
||||
|
||||
@fastapi_app.post('/general-info')
|
||||
def general_info(authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.modes import data_provider
|
||||
|
||||
try:
|
||||
data = data_provider.get_general_info(has_live=HAS_LIVE_TRADE_PLUGIN)
|
||||
except Exception as e:
|
||||
return JSONResponse({
|
||||
'error': str(e)
|
||||
}, status_code=500)
|
||||
|
||||
return JSONResponse(
|
||||
data,
|
||||
status_code=200
|
||||
)
|
||||
|
||||
|
||||
@fastapi_app.post('/import-candles')
|
||||
def import_candles(request_json: ImportCandlesRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
validate_cwd()
|
||||
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
from jesse.services import db
|
||||
|
||||
from jesse.modes import import_candles_mode
|
||||
|
||||
process_manager.add_task(
|
||||
import_candles_mode.run, 'candles-' + str(request_json.id), request_json.exchange, request_json.symbol,
|
||||
request_json.start_date, True
|
||||
)
|
||||
import_candles_mode.run(exchange, symbol, start_date, skip_confirmation)
|
||||
|
||||
return JSONResponse({'message': 'Started importing candles...'}, status_code=202)
|
||||
db.close_connection()
|
||||
|
||||
|
||||
@fastapi_app.delete("/import-candles")
|
||||
def cancel_import_candles(request_json: CancelRequestJson, authorization: Optional[str] = Header(None)):
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
process_manager.cancel_process('candles-' + request_json.id)
|
||||
|
||||
return JSONResponse({'message': f'Candles process with ID of {request_json.id} was requested for termination'}, status_code=202)
|
||||
|
||||
|
||||
@fastapi_app.post("/backtest")
|
||||
def backtest(request_json: BacktestRequestJson, authorization: Optional[str] = Header(None)):
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
@cli.command()
|
||||
@click.argument('start_date', required=True, type=str)
|
||||
@click.argument('finish_date', required=True, type=str)
|
||||
@click.option('--debug/--no-debug', default=False,
|
||||
help='Displays logging messages instead of the progressbar. Used for debugging your strategy.')
|
||||
@click.option('--csv/--no-csv', default=False,
|
||||
help='Outputs a CSV file of all executed trades on completion.')
|
||||
@click.option('--json/--no-json', default=False,
|
||||
help='Outputs a JSON file of all executed trades on completion.')
|
||||
@click.option('--fee/--no-fee', default=True,
|
||||
help='You can use "--no-fee" as a quick way to set trading fee to zero.')
|
||||
@click.option('--chart/--no-chart', default=False,
|
||||
help='Generates charts of daily portfolio balance and assets price change. Useful for a visual comparision of your portfolio against the market.')
|
||||
@click.option('--tradingview/--no-tradingview', default=False,
|
||||
help="Generates an output that can be copy-and-pasted into tradingview.com's pine-editor too see the trades in their charts.")
|
||||
@click.option('--full-reports/--no-full-reports', default=False,
|
||||
help="Generates QuantStats' HTML output with metrics reports like Sharpe ratio, Win rate, Volatility, etc., and batch plotting for visualizing performance, drawdowns, rolling statistics, monthly returns, etc.")
|
||||
def backtest(start_date: str, finish_date: str, debug: bool, csv: bool, json: bool, fee: bool, chart: bool,
|
||||
tradingview: bool, full_reports: bool) -> None:
|
||||
"""
|
||||
backtest mode. Enter in "YYYY-MM-DD" "YYYY-MM-DD"
|
||||
"""
|
||||
validate_cwd()
|
||||
|
||||
from jesse.modes.backtest_mode import run as run_backtest
|
||||
from jesse.config import config
|
||||
config['app']['trading_mode'] = 'backtest'
|
||||
|
||||
process_manager.add_task(
|
||||
run_backtest,
|
||||
'backtest-' + str(request_json.id),
|
||||
request_json.debug_mode,
|
||||
request_json.config,
|
||||
request_json.routes,
|
||||
request_json.extra_routes,
|
||||
request_json.start_date,
|
||||
request_json.finish_date,
|
||||
None,
|
||||
request_json.export_chart,
|
||||
request_json.export_tradingview,
|
||||
request_json.export_full_reports,
|
||||
request_json.export_csv,
|
||||
request_json.export_json
|
||||
)
|
||||
register_custom_exception_handler()
|
||||
|
||||
return JSONResponse({'message': 'Started backtesting...'}, status_code=202)
|
||||
from jesse.services import db
|
||||
from jesse.modes import backtest_mode
|
||||
from jesse.services.selectors import get_exchange
|
||||
|
||||
# debug flag
|
||||
config['app']['debug_mode'] = debug
|
||||
|
||||
# fee flag
|
||||
if not fee:
|
||||
for e in config['app']['trading_exchanges']:
|
||||
config['env']['exchanges'][e]['fee'] = 0
|
||||
get_exchange(e).fee = 0
|
||||
|
||||
backtest_mode.run(start_date, finish_date, chart=chart, tradingview=tradingview, csv=csv,
|
||||
json=json, full_reports=full_reports)
|
||||
|
||||
db.close_connection()
|
||||
|
||||
|
||||
@fastapi_app.post("/optimization")
|
||||
async def optimization(request_json: OptimizationRequestJson, authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
@cli.command()
|
||||
@click.argument('start_date', required=True, type=str)
|
||||
@click.argument('finish_date', required=True, type=str)
|
||||
@click.argument('optimal_total', required=True, type=int)
|
||||
@click.option(
|
||||
'--cpu', default=0, show_default=True,
|
||||
help='The number of CPU cores that Jesse is allowed to use. If set to 0, it will use as many as is available on your machine.')
|
||||
@click.option(
|
||||
'--debug/--no-debug', default=False,
|
||||
help='Displays detailed logs about the genetics algorithm. Use it if you are interested int he genetics algorithm.'
|
||||
)
|
||||
@click.option('--csv/--no-csv', default=False, help='Outputs a CSV file of all DNAs on completion.')
|
||||
@click.option('--json/--no-json', default=False, help='Outputs a JSON file of all DNAs on completion.')
|
||||
def optimize(start_date: str, finish_date: str, optimal_total: int, cpu: int, debug: bool, csv: bool,
|
||||
json: bool) -> None:
|
||||
"""
|
||||
tunes the hyper-parameters of your strategy
|
||||
"""
|
||||
validate_cwd()
|
||||
from jesse.config import config
|
||||
config['app']['trading_mode'] = 'optimize'
|
||||
|
||||
from jesse.modes.optimize_mode import run as run_optimization
|
||||
register_custom_exception_handler()
|
||||
|
||||
process_manager.add_task(
|
||||
run_optimization,
|
||||
'optimize-' + str(request_json.id),
|
||||
request_json.debug_mode,
|
||||
request_json.config,
|
||||
request_json.routes,
|
||||
request_json.extra_routes,
|
||||
request_json.start_date,
|
||||
request_json.finish_date,
|
||||
request_json.optimal_total,
|
||||
request_json.export_csv,
|
||||
request_json.export_json
|
||||
)
|
||||
# debug flag
|
||||
config['app']['debug_mode'] = debug
|
||||
|
||||
# optimize_mode(start_date, finish_date, optimal_total, cpu, csv, json)
|
||||
from jesse.modes.optimize_mode import optimize_mode
|
||||
|
||||
return JSONResponse({'message': 'Started optimization...'}, status_code=202)
|
||||
optimize_mode(start_date, finish_date, optimal_total, cpu, csv, json)
|
||||
|
||||
|
||||
@fastapi_app.delete("/optimization")
|
||||
def cancel_optimization(request_json: CancelRequestJson, authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
@cli.command()
|
||||
@click.argument('name', required=True, type=str)
|
||||
def make_strategy(name: str) -> None:
|
||||
"""
|
||||
generates a new strategy folder from jesse/strategies/ExampleStrategy
|
||||
"""
|
||||
validate_cwd()
|
||||
from jesse.config import config
|
||||
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
config['app']['trading_mode'] = 'make-strategy'
|
||||
|
||||
process_manager.cancel_process('optimize-' + request_json.id)
|
||||
register_custom_exception_handler()
|
||||
|
||||
return JSONResponse({'message': f'Optimization process with ID of {request_json.id} was requested for termination'}, status_code=202)
|
||||
from jesse.services import strategy_maker
|
||||
|
||||
strategy_maker.generate(name)
|
||||
|
||||
|
||||
@fastapi_app.get("/download/{mode}/{file_type}/{session_id}")
|
||||
def download(mode: str, file_type: str, session_id: str, token: str = Query(...)):
|
||||
if not authenticator.is_valid_token(token):
|
||||
return authenticator.unauthorized_response()
|
||||
@cli.command()
|
||||
@click.argument('name', required=True, type=str)
|
||||
def make_project(name: str) -> None:
|
||||
"""
|
||||
generates a new strategy folder from jesse/strategies/ExampleStrategy
|
||||
"""
|
||||
from jesse.config import config
|
||||
|
||||
from jesse.modes import data_provider
|
||||
config['app']['trading_mode'] = 'make-project'
|
||||
|
||||
return data_provider.download_file(mode, file_type, session_id)
|
||||
register_custom_exception_handler()
|
||||
|
||||
from jesse.services import project_maker
|
||||
|
||||
project_maker.generate(name)
|
||||
|
||||
|
||||
@fastapi_app.delete("/backtest")
|
||||
def cancel_backtest(request_json: CancelRequestJson, authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
@cli.command()
|
||||
@click.option('--dna/--no-dna', default=False,
|
||||
help='Translates DNA into parameters. Used in optimize mode only')
|
||||
def routes(dna: bool) -> None:
|
||||
"""
|
||||
lists all routes
|
||||
"""
|
||||
validate_cwd()
|
||||
from jesse.config import config
|
||||
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
config['app']['trading_mode'] = 'routes'
|
||||
|
||||
process_manager.cancel_process('backtest-' + request_json.id)
|
||||
register_custom_exception_handler()
|
||||
|
||||
return JSONResponse({'message': f'Backtest process with ID of {request_json.id} was requested for termination'}, status_code=202)
|
||||
from jesse.modes import routes_mode
|
||||
|
||||
routes_mode.run(dna)
|
||||
|
||||
|
||||
@fastapi_app.on_event("shutdown")
|
||||
def shutdown_event():
|
||||
from jesse.services.db import database
|
||||
database.close_connection()
|
||||
live_package_exists = True
|
||||
try:
|
||||
import jesse_live
|
||||
except ModuleNotFoundError:
|
||||
live_package_exists = False
|
||||
if live_package_exists:
|
||||
# @cli.command()
|
||||
# def collect() -> None:
|
||||
# """
|
||||
# fetches streamed market data such as tickers, trades, and orderbook from
|
||||
# the WS connection and stores them into the database for later research.
|
||||
# """
|
||||
# validate_cwd()
|
||||
#
|
||||
# # set trading mode
|
||||
# from jesse.config import config
|
||||
# config['app']['trading_mode'] = 'collect'
|
||||
#
|
||||
# register_custom_exception_handler()
|
||||
#
|
||||
# from jesse_live.live.collect_mode import run
|
||||
#
|
||||
# run()
|
||||
|
||||
|
||||
if HAS_LIVE_TRADE_PLUGIN:
|
||||
from jesse.services.web import fastapi_app, LiveRequestJson, LiveCancelRequestJson, GetCandlesRequestJson, \
|
||||
GetLogsRequestJson, GetOrdersRequestJson
|
||||
from jesse.services import auth as authenticator
|
||||
|
||||
@fastapi_app.post("/live")
|
||||
def live(request_json: LiveRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse import validate_cwd
|
||||
|
||||
# dev_mode is used only by developers so it doesn't have to be a supported parameter
|
||||
dev_mode: bool = False
|
||||
|
||||
@cli.command()
|
||||
@click.option('--testdrive/--no-testdrive', default=False)
|
||||
@click.option('--debug/--no-debug', default=False)
|
||||
@click.option('--dev/--no-dev', default=False)
|
||||
def live(testdrive: bool, debug: bool, dev: bool) -> None:
|
||||
"""
|
||||
trades in real-time on exchange with REAL money
|
||||
"""
|
||||
validate_cwd()
|
||||
|
||||
# set trading mode
|
||||
from jesse.config import config
|
||||
config['app']['trading_mode'] = 'livetrade'
|
||||
config['app']['is_test_driving'] = testdrive
|
||||
|
||||
register_custom_exception_handler()
|
||||
|
||||
# debug flag
|
||||
config['app']['debug_mode'] = debug
|
||||
|
||||
from jesse_live import init
|
||||
from jesse.services.selectors import get_exchange
|
||||
live_config = locate('live-config.config')
|
||||
|
||||
# validate that the "live-config.py" file exists
|
||||
if live_config is None:
|
||||
jh.error('You\'re either missing the live-config.py file or haven\'t logged in. Run "jesse login" to fix it.', True)
|
||||
jh.terminate_app()
|
||||
|
||||
# inject live config
|
||||
init(config, live_config)
|
||||
|
||||
# execute live session
|
||||
from jesse_live import live_mode
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
trading_mode = 'livetrade' if request_json.paper_mode is False else 'papertrade'
|
||||
|
||||
process_manager.add_task(
|
||||
live_mode.run,
|
||||
f'{trading_mode}-' + str(request_json.id),
|
||||
request_json.debug_mode,
|
||||
dev_mode,
|
||||
request_json.config,
|
||||
request_json.routes,
|
||||
request_json.extra_routes,
|
||||
trading_mode,
|
||||
)
|
||||
|
||||
mode = 'live' if request_json.paper_mode is False else 'paper'
|
||||
|
||||
return JSONResponse({'message': f"Started {mode} trading..."}, status_code=202)
|
||||
from jesse_live.live_mode import run
|
||||
run(dev)
|
||||
|
||||
|
||||
@fastapi_app.delete("/live")
|
||||
def cancel_backtest(request_json: LiveCancelRequestJson, authorization: Optional[str] = Header(None)):
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse.services.multiprocessing import process_manager
|
||||
|
||||
trading_mode = 'livetrade' if request_json.paper_mode is False else 'papertrade'
|
||||
|
||||
process_manager.cancel_process(f'{trading_mode}-' + request_json.id)
|
||||
|
||||
return JSONResponse({'message': f'Live process with ID of {request_json.id} terminated.'}, status_code=200)
|
||||
|
||||
|
||||
@fastapi_app.post('/get-candles')
|
||||
def get_candles(json_request: GetCandlesRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
|
||||
from jesse import validate_cwd
|
||||
|
||||
@cli.command()
|
||||
@click.option('--debug/--no-debug', default=False)
|
||||
@click.option('--dev/--no-dev', default=False)
|
||||
def paper(debug: bool, dev: bool) -> None:
|
||||
"""
|
||||
trades in real-time on exchange with PAPER money
|
||||
"""
|
||||
validate_cwd()
|
||||
|
||||
from jesse.modes.data_provider import get_candles as gc
|
||||
# set trading mode
|
||||
from jesse.config import config
|
||||
config['app']['trading_mode'] = 'papertrade'
|
||||
|
||||
arr = gc(json_request.exchange, json_request.symbol, json_request.timeframe)
|
||||
register_custom_exception_handler()
|
||||
|
||||
return JSONResponse({
|
||||
'id': json_request.id,
|
||||
'data': arr
|
||||
}, status_code=200)
|
||||
# debug flag
|
||||
config['app']['debug_mode'] = debug
|
||||
|
||||
from jesse_live import init
|
||||
from jesse.services.selectors import get_exchange
|
||||
live_config = locate('live-config.config')
|
||||
|
||||
@fastapi_app.post('/get-logs')
|
||||
def get_logs(json_request: GetLogsRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
# validate that the "live-config.py" file exists
|
||||
if live_config is None:
|
||||
jh.error('You\'re either missing the live-config.py file or haven\'t logged in. Run "jesse login" to fix it.', True)
|
||||
jh.terminate_app()
|
||||
|
||||
from jesse_live.services.data_provider import get_logs as gl
|
||||
# inject live config
|
||||
init(config, live_config)
|
||||
|
||||
arr = gl(json_request.session_id, json_request.type)
|
||||
# execute live session
|
||||
from jesse_live.live_mode import run
|
||||
run(dev)
|
||||
|
||||
return JSONResponse({
|
||||
'id': json_request.id,
|
||||
'data': arr
|
||||
}, status_code=200)
|
||||
@cli.command()
|
||||
@click.option('--email', prompt='Email')
|
||||
@click.option('--password', prompt='Password', hide_input=True)
|
||||
def login(email, password) -> None:
|
||||
"""
|
||||
(Initially) Logins to the website.
|
||||
"""
|
||||
validate_cwd()
|
||||
|
||||
# set trading mode
|
||||
from jesse.config import config
|
||||
config['app']['trading_mode'] = 'login'
|
||||
|
||||
@fastapi_app.post('/get-orders')
|
||||
def get_orders(json_request: GetOrdersRequestJson, authorization: Optional[str] = Header(None)) -> JSONResponse:
|
||||
if not authenticator.is_valid_token(authorization):
|
||||
return authenticator.unauthorized_response()
|
||||
register_custom_exception_handler()
|
||||
|
||||
from jesse_live.services.data_provider import get_orders as go
|
||||
from jesse_live.auth import login
|
||||
|
||||
arr = go(json_request.session_id)
|
||||
|
||||
return JSONResponse({
|
||||
'id': json_request.id,
|
||||
'data': arr
|
||||
}, status_code=200)
|
||||
|
||||
|
||||
# Mount static files.Must be loaded at the end to prevent overlapping with API endpoints
|
||||
fastapi_app.mount("/", StaticFiles(directory=f"{JESSE_DIR}/static"), name="static")
|
||||
login(email, password)
|
||||
|
||||
159
jesse/config.py
159
jesse/config.py
@@ -1,9 +1,14 @@
|
||||
import jesse.helpers as jh
|
||||
|
||||
|
||||
config = {
|
||||
# these values are related to the user's environment
|
||||
'env': {
|
||||
'databases': {
|
||||
'postgres_host': '127.0.0.1',
|
||||
'postgres_name': 'jesse_db',
|
||||
'postgres_port': 5432,
|
||||
'postgres_username': 'jesse_user',
|
||||
'postgres_password': 'password',
|
||||
},
|
||||
|
||||
'caching': {
|
||||
'driver': 'pickle'
|
||||
},
|
||||
@@ -37,64 +42,6 @@ config = {
|
||||
],
|
||||
},
|
||||
|
||||
'Bybit Perpetual': {
|
||||
'fee': 0.00075,
|
||||
|
||||
# backtest mode only: accepted are 'spot' and 'futures'
|
||||
# 'spot' support is currently very limited - you can use 'futures' with leverage 1 for now
|
||||
'type': 'futures',
|
||||
|
||||
# futures mode only
|
||||
'settlement_currency': 'USDT',
|
||||
# accepted values are: 'cross' and 'isolated'
|
||||
'futures_leverage_mode': 'cross',
|
||||
# 1x, 2x, 10x, 50x, etc. Enter as integers
|
||||
'futures_leverage': 1,
|
||||
|
||||
'assets': [
|
||||
{'asset': 'USDT', 'balance': 10_000},
|
||||
],
|
||||
},
|
||||
|
||||
'Testnet Bybit Perpetual': {
|
||||
'fee': 0.00075,
|
||||
|
||||
# backtest mode only: accepted are 'spot' and 'futures'
|
||||
# 'spot' support is currently very limited - you can use 'futures' with leverage 1 for now
|
||||
'type': 'futures',
|
||||
|
||||
# futures mode only
|
||||
'settlement_currency': 'USDT',
|
||||
# accepted values are: 'cross' and 'isolated'
|
||||
'futures_leverage_mode': 'cross',
|
||||
# 1x, 2x, 10x, 50x, etc. Enter as integers
|
||||
'futures_leverage': 1,
|
||||
|
||||
'assets': [
|
||||
{'asset': 'USDT', 'balance': 10_000},
|
||||
],
|
||||
},
|
||||
|
||||
# https://ftx.com/markets/future
|
||||
'FTX Futures': {
|
||||
'fee': 0.0006,
|
||||
|
||||
# backtest mode only: accepted are 'spot' and 'futures'
|
||||
# 'spot' support is currently very limited - you can use 'futures' with leverage 1 for now
|
||||
'type': 'futures',
|
||||
|
||||
# futures mode only
|
||||
'settlement_currency': 'USD',
|
||||
# accepted values are: 'cross' and 'isolated'
|
||||
'futures_leverage_mode': 'cross',
|
||||
# 1x, 2x, 10x, 20x, etc. Enter as integers
|
||||
'futures_leverage': 1,
|
||||
|
||||
'assets': [
|
||||
{'asset': 'USD', 'balance': 10_000},
|
||||
],
|
||||
},
|
||||
|
||||
# https://www.bitfinex.com
|
||||
'Bitfinex': {
|
||||
'fee': 0.002,
|
||||
@@ -196,6 +143,20 @@ config = {
|
||||
},
|
||||
},
|
||||
|
||||
# changes the metrics output of the backtest
|
||||
'metrics': {
|
||||
'sharpe_ratio': True,
|
||||
'calmar_ratio': False,
|
||||
'sortino_ratio': False,
|
||||
'omega_ratio': False,
|
||||
'winning_streak': False,
|
||||
'losing_streak': False,
|
||||
'largest_losing_trade': False,
|
||||
'largest_winning_trade': False,
|
||||
'total_winning_trades': False,
|
||||
'total_losing_trades': False,
|
||||
},
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# Optimize mode
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
@@ -203,7 +164,7 @@ config = {
|
||||
# Below configurations are related to the optimize mode
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
'optimization': {
|
||||
# sharpe, calmar, sortino, omega, serenity, smart sharpe, smart sortino
|
||||
# sharpe, calmar, sortino, omega
|
||||
'ratio': 'sharpe',
|
||||
},
|
||||
|
||||
@@ -255,67 +216,27 @@ config = {
|
||||
},
|
||||
}
|
||||
|
||||
backup_config = config.copy()
|
||||
|
||||
def set_config(conf: dict) -> None:
|
||||
|
||||
def set_config(c) -> None:
|
||||
global config
|
||||
|
||||
# optimization mode only
|
||||
if jh.is_optimizing():
|
||||
# ratio
|
||||
config['env']['optimization']['ratio'] = conf['ratio']
|
||||
# exchange info (only one because the optimize mode supports only one trading route at the moment)
|
||||
config['env']['optimization']['exchange'] = conf['exchange']
|
||||
# warm_up_candles
|
||||
config['env']['optimization']['warmup_candles_num'] = int(conf['warm_up_candles'])
|
||||
|
||||
# backtest and live
|
||||
if jh.is_backtesting() or jh.is_live():
|
||||
# warm_up_candles
|
||||
config['env']['data']['warmup_candles_num'] = int(conf['warm_up_candles'])
|
||||
# logs
|
||||
config['env']['logging'] = conf['logging']
|
||||
# exchanges
|
||||
for key, e in conf['exchanges'].items():
|
||||
config['env']['exchanges'][e['name']] = {
|
||||
'fee': float(e['fee']),
|
||||
'type': 'futures',
|
||||
# used only in futures trading
|
||||
# 'settlement_currency': 'USDT',
|
||||
'settlement_currency': jh.get_settlement_currency_from_exchange(e['name']),
|
||||
# accepted values are: 'cross' and 'isolated'
|
||||
'futures_leverage_mode': e['futures_leverage_mode'],
|
||||
# 1x, 2x, 10x, 50x, etc. Enter as integers
|
||||
'futures_leverage': int(e['futures_leverage']),
|
||||
'assets': [
|
||||
{'asset': 'USDT', 'balance': float(e['balance'])},
|
||||
],
|
||||
}
|
||||
|
||||
# live mode only
|
||||
if jh.is_live():
|
||||
config['env']['notifications'] = conf['notifications']
|
||||
|
||||
# TODO: must become a config value later when we go after multi account support?
|
||||
config['env']['identifier'] = 'main'
|
||||
|
||||
# # add sandbox because it isn't in the local config file but it is needed since we might have replaced it
|
||||
# config['env']['exchanges']['Sandbox'] = {
|
||||
# 'type': 'spot',
|
||||
# # used only in futures trading
|
||||
# 'settlement_currency': 'USDT',
|
||||
# 'fee': 0,
|
||||
# 'futures_leverage_mode': 'cross',
|
||||
# 'futures_leverage': 1,
|
||||
# 'assets': [
|
||||
# {'asset': 'USDT', 'balance': 10_000},
|
||||
# {'asset': 'BTC', 'balance': 0},
|
||||
# ],
|
||||
# }
|
||||
config['env'] = c
|
||||
# add sandbox because it isn't in the local config file
|
||||
config['env']['exchanges']['Sandbox'] = {
|
||||
'type': 'spot',
|
||||
# used only in futures trading
|
||||
'settlement_currency': 'USDT',
|
||||
'fee': 0,
|
||||
'futures_leverage_mode': 'cross',
|
||||
'futures_leverage': 1,
|
||||
'assets': [
|
||||
{'asset': 'USDT', 'balance': 10_000},
|
||||
{'asset': 'BTC', 'balance': 0},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def reset_config() -> None:
|
||||
global config
|
||||
config = backup_config.copy()
|
||||
|
||||
|
||||
backup_config = config.copy()
|
||||
|
||||
@@ -12,9 +12,7 @@ class order_statuses:
|
||||
ACTIVE = 'ACTIVE'
|
||||
CANCELED = 'CANCELED'
|
||||
EXECUTED = 'EXECUTED'
|
||||
PARTIALLY_EXECUTED = 'PARTIALLY EXECUTED'
|
||||
QUEUED = 'QUEUED'
|
||||
LIQUIDATED = 'LIQUIDATED'
|
||||
|
||||
|
||||
class timeframes:
|
||||
@@ -32,6 +30,8 @@ class timeframes:
|
||||
HOUR_8 = '8h'
|
||||
HOUR_12 = '12h'
|
||||
DAY_1 = '1D'
|
||||
DAY_3 = '3D'
|
||||
WEEK_1 = '1W'
|
||||
|
||||
|
||||
class colors:
|
||||
|
||||
@@ -18,6 +18,10 @@ class InvalidStrategy(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Breaker(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CandleNotFoundInDatabase(Exception):
|
||||
pass
|
||||
|
||||
@@ -30,6 +34,10 @@ class SymbolNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MaximumDecimal(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RouteNotFound(Exception):
|
||||
pass
|
||||
|
||||
@@ -46,10 +54,6 @@ class ExchangeNotResponding(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ExchangeRejectedOrder(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidShape(Exception):
|
||||
pass
|
||||
|
||||
@@ -68,7 +72,3 @@ class NegativeBalance(Exception):
|
||||
|
||||
class InsufficientMargin(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Termination(Exception):
|
||||
pass
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Union
|
||||
from jesse.models import Order
|
||||
|
||||
|
||||
class Exchange(ABC):
|
||||
"""
|
||||
@@ -8,29 +7,29 @@ class Exchange(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def market_order(self, symbol: str, qty: float, current_price: float, side: str, role: str, flags: list) -> Order:
|
||||
def market_order(self, symbol, qty, current_price, side, role, flags):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def limit_order(self, symbol: str, qty: float, price: float, side: str, role: str, flags: list) -> Order:
|
||||
def limit_order(self, symbol, qty, price, side, role, flags):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def stop_order(self, symbol: str, qty: float, price: float, side: str, role: str, flags: list) -> Order:
|
||||
def stop_order(self, symbol, qty, price, side, role, flags):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_all_orders(self, symbol: str) -> None:
|
||||
def cancel_all_orders(self, symbol):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_order(self, symbol: str, order_id: str) -> None:
|
||||
def cancel_order(self, symbol, order_id):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_exec_inst(self, flags: list) -> Union[str, None]:
|
||||
def get_exec_inst(self, flags):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _fetch_precisions(self) -> None:
|
||||
def _get_precisions(self):
|
||||
pass
|
||||
|
||||
@@ -3,14 +3,14 @@ from jesse.enums import order_types
|
||||
from jesse.exchanges.exchange import Exchange
|
||||
from jesse.models import Order
|
||||
from jesse.store import store
|
||||
from typing import Union
|
||||
|
||||
|
||||
class Sandbox(Exchange):
|
||||
def __init__(self, name='Sandbox'):
|
||||
super().__init__()
|
||||
self.name = name
|
||||
|
||||
def market_order(self, symbol: str, qty: float, current_price: float, side: str, role: str, flags: list) -> Order:
|
||||
def market_order(self, symbol, qty, current_price, side, role, flags):
|
||||
order = Order({
|
||||
'id': jh.generate_unique_id(),
|
||||
'symbol': symbol,
|
||||
@@ -29,7 +29,7 @@ class Sandbox(Exchange):
|
||||
|
||||
return order
|
||||
|
||||
def limit_order(self, symbol: str, qty: float, price: float, side: str, role: str, flags: list) -> Order:
|
||||
def limit_order(self, symbol, qty, price, side, role, flags):
|
||||
order = Order({
|
||||
'id': jh.generate_unique_id(),
|
||||
'symbol': symbol,
|
||||
@@ -46,7 +46,7 @@ class Sandbox(Exchange):
|
||||
|
||||
return order
|
||||
|
||||
def stop_order(self, symbol: str, qty: float, price: float, side: str, role: str, flags: list) -> Order:
|
||||
def stop_order(self, symbol, qty, price, side, role, flags):
|
||||
order = Order({
|
||||
'id': jh.generate_unique_id(),
|
||||
'symbol': symbol,
|
||||
@@ -63,7 +63,7 @@ class Sandbox(Exchange):
|
||||
|
||||
return order
|
||||
|
||||
def cancel_all_orders(self, symbol: str) -> None:
|
||||
def cancel_all_orders(self, symbol):
|
||||
orders = filter(lambda o: o.is_new,
|
||||
store.orders.get_orders(self.name, symbol))
|
||||
|
||||
@@ -73,13 +73,13 @@ class Sandbox(Exchange):
|
||||
if not jh.is_unit_testing():
|
||||
store.orders.storage[f'{self.name}-{symbol}'].clear()
|
||||
|
||||
def cancel_order(self, symbol: str, order_id: str) -> None:
|
||||
def cancel_order(self, symbol, order_id):
|
||||
store.orders.get_order_by_id(self.name, symbol, order_id).cancel()
|
||||
|
||||
def get_exec_inst(self, flags: list) -> Union[str, None]:
|
||||
def get_exec_inst(self, flags):
|
||||
if flags:
|
||||
return flags[0]
|
||||
return None
|
||||
|
||||
def _fetch_precisions(self) -> None:
|
||||
def _get_precisions(self):
|
||||
pass
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from random import randint
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
@@ -13,7 +12,7 @@ min_price = min(open_price, close_price)
|
||||
low_price = min_price if randint(0, 1) else randint(min_price, min_price + 10)
|
||||
|
||||
|
||||
def fake_range_candle(count: int) -> np.ndarray:
|
||||
def fake_range_candle(count) -> np.ndarray:
|
||||
fake_candle(reset=True)
|
||||
arr = np.zeros((count, 6))
|
||||
for i in range(count):
|
||||
@@ -21,7 +20,7 @@ def fake_range_candle(count: int) -> np.ndarray:
|
||||
return arr
|
||||
|
||||
|
||||
def fake_range_candle_from_range_prices(prices: Union[list, range]) -> np.ndarray:
|
||||
def fake_range_candle_from_range_prices(prices) -> np.ndarray:
|
||||
fake_candle(reset=True)
|
||||
global first_timestamp
|
||||
arr = []
|
||||
@@ -46,7 +45,7 @@ def fake_range_candle_from_range_prices(prices: Union[list, range]) -> np.ndarra
|
||||
return np.array(arr)
|
||||
|
||||
|
||||
def fake_candle(attributes: dict = None, reset: bool = False) -> np.ndarray:
|
||||
def fake_candle(attributes=None, reset=False):
|
||||
global first_timestamp
|
||||
global open_price
|
||||
global close_price
|
||||
|
||||
@@ -7,7 +7,7 @@ from jesse.models import Order
|
||||
first_timestamp = 1552309186171
|
||||
|
||||
|
||||
def fake_order(attributes: dict = None) -> Order:
|
||||
def fake_order(attributes=None):
|
||||
"""
|
||||
|
||||
:param attributes:
|
||||
|
||||
240
jesse/helpers.py
240
jesse/helpers.py
@@ -6,7 +6,7 @@ import string
|
||||
import sys
|
||||
import uuid
|
||||
from typing import List, Tuple, Union, Any
|
||||
from pprint import pprint
|
||||
|
||||
import arrow
|
||||
import click
|
||||
import numpy as np
|
||||
@@ -77,7 +77,7 @@ def color(msg_text: str, msg_color: str) -> str:
|
||||
return click.style(msg_text, fg='magenta')
|
||||
if msg_color == 'cyan':
|
||||
return click.style(msg_text, fg='cyan')
|
||||
if msg_color in {'white', 'gray'}:
|
||||
if msg_color in ['white', 'gray']:
|
||||
return click.style(msg_text, fg='white')
|
||||
|
||||
raise ValueError('unsupported color')
|
||||
@@ -94,7 +94,9 @@ def convert_number(old_max: float, old_min: float, new_max: float, new_min: floa
|
||||
|
||||
old_range = (old_max - old_min)
|
||||
new_range = (new_max - new_min)
|
||||
return (((old_value - old_min) * new_range) / old_range) + new_min
|
||||
new_value = (((old_value - old_min) * new_range) / old_range) + new_min
|
||||
|
||||
return new_value
|
||||
|
||||
|
||||
def dashless_symbol(symbol: str) -> str:
|
||||
@@ -193,11 +195,6 @@ def file_exists(path: str) -> bool:
|
||||
return os.path.isfile(path)
|
||||
|
||||
|
||||
def make_directory(path: str) -> None:
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
def floor_with_precision(num: float, precision: int = 0) -> float:
|
||||
temp = 10 ** precision
|
||||
return math.floor(num * temp) / temp
|
||||
@@ -258,7 +255,7 @@ def get_config(keys: str, default: Any = None) -> Any:
|
||||
if not str:
|
||||
raise ValueError('keys string cannot be empty')
|
||||
|
||||
if is_unit_testing() or keys not in CACHED_CONFIG:
|
||||
if is_unit_testing() or not keys in CACHED_CONFIG:
|
||||
if os.environ.get(keys.upper().replace(".", "_").replace(" ", "_")) is not None:
|
||||
CACHED_CONFIG[keys] = os.environ.get(keys.upper().replace(".", "_").replace(" ", "_"))
|
||||
else:
|
||||
@@ -273,17 +270,10 @@ def get_config(keys: str, default: Any = None) -> Any:
|
||||
def get_strategy_class(strategy_name: str):
|
||||
from pydoc import locate
|
||||
|
||||
if not is_unit_testing():
|
||||
return locate(f'strategies.{strategy_name}.{strategy_name}')
|
||||
path = sys.path[0]
|
||||
# live plugin
|
||||
if path.endswith('jesse-live'):
|
||||
strategy_dir = f'tests.strategies.{strategy_name}.{strategy_name}'
|
||||
# main framework
|
||||
if is_unit_testing():
|
||||
return locate(f'jesse.strategies.{strategy_name}.{strategy_name}')
|
||||
else:
|
||||
strategy_dir = f'jesse.strategies.{strategy_name}.{strategy_name}'
|
||||
|
||||
return locate(strategy_dir)
|
||||
return locate(f'strategies.{strategy_name}.{strategy_name}')
|
||||
|
||||
|
||||
def insecure_hash(msg: str) -> str:
|
||||
@@ -322,7 +312,7 @@ def is_debugging() -> bool:
|
||||
|
||||
def is_importing_candles() -> bool:
|
||||
from jesse.config import config
|
||||
return config['app']['trading_mode'] == 'candles'
|
||||
return config['app']['trading_mode'] == 'import-candles'
|
||||
|
||||
|
||||
def is_live() -> bool:
|
||||
@@ -355,7 +345,7 @@ def is_unit_testing() -> bool:
|
||||
return "pytest" in sys.modules or config['app']['is_unit_testing']
|
||||
|
||||
|
||||
def is_valid_uuid(uuid_to_test:str, version: int = 4) -> bool:
|
||||
def is_valid_uuid(uuid_to_test, version: int = 4) -> bool:
|
||||
try:
|
||||
uuid_obj = uuid.UUID(uuid_to_test, version=version)
|
||||
except ValueError:
|
||||
@@ -373,6 +363,10 @@ def key(exchange: str, symbol: str, timeframe: str = None):
|
||||
def max_timeframe(timeframes_list: list) -> str:
|
||||
from jesse.enums import timeframes
|
||||
|
||||
if timeframes.WEEK_1 in timeframes_list:
|
||||
return timeframes.WEEK_1
|
||||
if timeframes.DAY_3 in timeframes_list:
|
||||
return timeframes.DAY_3
|
||||
if timeframes.DAY_1 in timeframes_list:
|
||||
return timeframes.DAY_1
|
||||
if timeframes.HOUR_12 in timeframes_list:
|
||||
@@ -407,42 +401,32 @@ def normalize(x: float, x_min: float, x_max: float) -> float:
|
||||
"""
|
||||
Rescaling data to have values between 0 and 1
|
||||
"""
|
||||
return (x - x_min) / (x_max - x_min)
|
||||
x_new = (x - x_min) / (x_max - x_min)
|
||||
return x_new
|
||||
|
||||
|
||||
def now(force_fresh=False) -> int:
|
||||
def now() -> int:
|
||||
"""
|
||||
Always returns the current time in milliseconds but rounds time in matter of seconds
|
||||
"""
|
||||
return now_to_timestamp(force_fresh)
|
||||
return now_to_timestamp()
|
||||
|
||||
|
||||
def now_to_timestamp(force_fresh=False) -> int:
|
||||
if not force_fresh and (not (is_live() or is_collecting_data() or is_importing_candles())):
|
||||
def now_to_timestamp() -> int:
|
||||
if not (is_live() or is_collecting_data() or is_importing_candles()):
|
||||
from jesse.store import store
|
||||
return store.app.time
|
||||
|
||||
return arrow.utcnow().int_timestamp * 1000
|
||||
|
||||
|
||||
def current_1m_candle_timestamp():
|
||||
return arrow.utcnow().floor('minute').int_timestamp * 1000
|
||||
|
||||
|
||||
def np_ffill(arr: np.ndarray, axis: int = 0) -> np.ndarray:
|
||||
idx_shape = tuple([slice(None)] + [np.newaxis] * (len(arr.shape) - axis - 1))
|
||||
idx = np.where(~np.isnan(arr), np.arange(arr.shape[axis])[idx_shape], 0)
|
||||
np.maximum.accumulate(idx, axis=axis, out=idx)
|
||||
slc = [
|
||||
np.arange(k)[
|
||||
tuple(
|
||||
slice(None) if dim == i else np.newaxis
|
||||
for dim in range(len(arr.shape))
|
||||
)
|
||||
]
|
||||
for i, k in enumerate(arr.shape)
|
||||
]
|
||||
|
||||
slc = [np.arange(k)[tuple([slice(None) if dim == i else np.newaxis
|
||||
for dim in range(len(arr.shape))])]
|
||||
for i, k in enumerate(arr.shape)]
|
||||
slc[axis] = idx
|
||||
return arr[tuple(slc)]
|
||||
|
||||
@@ -488,10 +472,10 @@ def orderbook_insertion_index_search(arr, target: int, ascending: bool = True) -
|
||||
lower = 0
|
||||
upper = len(arr)
|
||||
|
||||
while lower < upper:
|
||||
x = lower + (upper - lower) // 2
|
||||
val = arr[x][0]
|
||||
if ascending:
|
||||
if ascending:
|
||||
while lower < upper:
|
||||
x = lower + (upper - lower) // 2
|
||||
val = arr[x][0]
|
||||
if target == val:
|
||||
return True, x
|
||||
elif target > val:
|
||||
@@ -502,16 +486,20 @@ def orderbook_insertion_index_search(arr, target: int, ascending: bool = True) -
|
||||
if lower == x:
|
||||
return False, lower
|
||||
upper = x
|
||||
elif target == val:
|
||||
return True, x
|
||||
elif target < val:
|
||||
if lower == x:
|
||||
return False, lower + 1
|
||||
lower = x
|
||||
elif target > val:
|
||||
if lower == x:
|
||||
return False, lower
|
||||
upper = x
|
||||
else:
|
||||
while lower < upper:
|
||||
x = lower + (upper - lower) // 2
|
||||
val = arr[x][0]
|
||||
if target == val:
|
||||
return True, x
|
||||
elif target < val:
|
||||
if lower == x:
|
||||
return False, lower + 1
|
||||
lower = x
|
||||
elif target > val:
|
||||
if lower == x:
|
||||
return False, lower
|
||||
upper = x
|
||||
|
||||
|
||||
def orderbook_trim_price(p: float, ascending: bool, unit: float) -> float:
|
||||
@@ -549,7 +537,7 @@ def quote_asset(symbol: str) -> str:
|
||||
|
||||
|
||||
def random_str(num_characters: int = 8) -> str:
|
||||
return ''.join(random.choice(string.ascii_letters) for _ in range(num_characters))
|
||||
return ''.join(random.choice(string.ascii_letters) for i in range(num_characters))
|
||||
|
||||
|
||||
def readable_duration(seconds: int, granularity: int = 2) -> str:
|
||||
@@ -644,9 +632,9 @@ def side_to_type(s: str) -> str:
|
||||
raise ValueError
|
||||
|
||||
|
||||
def string_after_character(s: str, character: str) -> str:
|
||||
def string_after_character(string: str, character: str) -> str:
|
||||
try:
|
||||
return s.split(character, 1)[1]
|
||||
return string.split(character, 1)[1]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
@@ -673,8 +661,8 @@ def style(msg_text: str, msg_style: str) -> str:
|
||||
|
||||
def terminate_app() -> None:
|
||||
# close the database
|
||||
from jesse.services.db import database
|
||||
database.close_connection()
|
||||
from jesse.services.db import close_connection
|
||||
close_connection()
|
||||
# disconnect python from the OS
|
||||
os._exit(1)
|
||||
|
||||
@@ -685,15 +673,13 @@ def error(msg: str, force_print: bool = False) -> None:
|
||||
from jesse.services import logger
|
||||
logger.error(msg)
|
||||
if force_print:
|
||||
_print_error(msg)
|
||||
print('\n')
|
||||
print(color('========== critical error =========='.upper(), 'red'))
|
||||
print(color(msg, 'red'))
|
||||
else:
|
||||
_print_error(msg)
|
||||
|
||||
|
||||
def _print_error(msg: str) -> None:
|
||||
print('\n')
|
||||
print(color('========== critical error =========='.upper(), 'red'))
|
||||
print(color(msg, 'red'))
|
||||
print('\n')
|
||||
print(color('========== critical error =========='.upper(), 'red'))
|
||||
print(color(msg, 'red'))
|
||||
|
||||
|
||||
def timeframe_to_one_minutes(timeframe: str) -> int:
|
||||
@@ -715,6 +701,8 @@ def timeframe_to_one_minutes(timeframe: str) -> int:
|
||||
timeframes.HOUR_8: 60 * 8,
|
||||
timeframes.HOUR_12: 60 * 12,
|
||||
timeframes.DAY_1: 60 * 24,
|
||||
timeframes.DAY_3: 60 * 24 * 3,
|
||||
timeframes.WEEK_1: 60 * 24 * 7,
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -774,121 +762,3 @@ def closing_side(position_type: str) -> str:
|
||||
return 'buy'
|
||||
else:
|
||||
raise ValueError(f'Value entered for position_type ({position_type}) is not valid')
|
||||
|
||||
|
||||
def merge_dicts(d1: dict, d2: dict) -> dict:
|
||||
"""
|
||||
Merges nested dictionaries
|
||||
|
||||
:param d1: dict
|
||||
:param d2: dict
|
||||
:return: dict
|
||||
"""
|
||||
def inner(dict1, dict2):
|
||||
for k in set(dict1.keys()).union(dict2.keys()):
|
||||
if k in dict1 and k in dict2:
|
||||
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
|
||||
yield k, dict(merge_dicts(dict1[k], dict2[k]))
|
||||
else:
|
||||
yield k, dict2[k]
|
||||
elif k in dict1:
|
||||
yield k, dict1[k]
|
||||
else:
|
||||
yield k, dict2[k]
|
||||
|
||||
return dict(inner(d1, d2))
|
||||
|
||||
|
||||
def computer_name():
|
||||
import platform
|
||||
return platform.node()
|
||||
|
||||
|
||||
def validate_response(response):
|
||||
if response.status_code != 200:
|
||||
err_msg = f"[{response.status_code}]: {response.json()['message']}\nPlease contact us at support@jesse.trade if this is unexpected."
|
||||
|
||||
if response.status_code not in [401, 403]:
|
||||
raise ConnectionError(err_msg)
|
||||
error(err_msg, force_print=True)
|
||||
terminate_app()
|
||||
|
||||
|
||||
def get_session_id():
|
||||
from jesse.store import store
|
||||
return store.app.session_id
|
||||
|
||||
|
||||
def get_pid():
|
||||
return os.getpid()
|
||||
|
||||
|
||||
def is_jesse_project():
|
||||
ls = os.listdir('.')
|
||||
return 'strategies' in ls and 'storage' in ls
|
||||
|
||||
|
||||
def dd(item, pretty=True):
|
||||
"""
|
||||
Dump and Die but pretty: used for debugging when developing Jesse
|
||||
"""
|
||||
dump(item, pretty)
|
||||
terminate_app()
|
||||
|
||||
|
||||
def dump(item, pretty=True):
|
||||
"""
|
||||
Dump object in pretty format: used for debugging when developing Jesse
|
||||
"""
|
||||
print(
|
||||
color('\n========= Debugging Value =========='.upper(), 'yellow')
|
||||
)
|
||||
|
||||
if pretty:
|
||||
pprint(item)
|
||||
else:
|
||||
print(item)
|
||||
|
||||
print(
|
||||
color('====================================\n', 'yellow')
|
||||
)
|
||||
|
||||
|
||||
def float_or_none(item):
|
||||
"""
|
||||
Return the float of the value if it's not None
|
||||
"""
|
||||
if item is None:
|
||||
return None
|
||||
else:
|
||||
return float(item)
|
||||
|
||||
|
||||
def str_or_none(item, encoding='utf-8'):
|
||||
"""
|
||||
Return the str of the value if it's not None
|
||||
"""
|
||||
if item is None:
|
||||
return None
|
||||
else:
|
||||
# return item if it's str, if not, decode it using encoding
|
||||
if isinstance(item, str):
|
||||
return item
|
||||
return str(item, encoding)
|
||||
|
||||
|
||||
def get_settlement_currency_from_exchange(exchange: str):
|
||||
if exchange in {'FTX Futures', 'Bitfinex', 'Coinbase'}:
|
||||
return 'USD'
|
||||
else:
|
||||
return 'USDT'
|
||||
|
||||
|
||||
def cpu_cores_count():
|
||||
from multiprocessing import cpu_count
|
||||
return cpu_count()
|
||||
|
||||
|
||||
# a function that converts name to env_name. Example: 'Testnet Binance Futures' into 'TESTNET_BINANCE_FUTURES'
|
||||
def convert_to_env_name(name: str) -> str:
|
||||
return name.replace(' ', '_').upper()
|
||||
|
||||
@@ -62,7 +62,6 @@ from .ht_phasor import ht_phasor
|
||||
from .ht_sine import ht_sine
|
||||
from .ht_trendline import ht_trendline
|
||||
from .ht_trendmode import ht_trendmode
|
||||
from .hurst_exponent import hurst_exponent
|
||||
from .hwma import hwma
|
||||
from .ichimoku_cloud import ichimoku_cloud
|
||||
from .ichimoku_cloud_seq import ichimoku_cloud_seq
|
||||
@@ -114,7 +113,6 @@ from .pvi import pvi
|
||||
from .pwma import pwma
|
||||
from .qstick import qstick
|
||||
from .reflex import reflex
|
||||
from .rma import rma
|
||||
from .roc import roc
|
||||
from .rocp import rocp
|
||||
from .rocr import rocr
|
||||
@@ -171,6 +169,5 @@ from .wclprice import wclprice
|
||||
from .wilders import wilders
|
||||
from .willr import willr
|
||||
from .wma import wma
|
||||
from .wt import wt
|
||||
from .zlema import zlema
|
||||
from .zscore import zscore
|
||||
|
||||
@@ -39,11 +39,13 @@ def numpy_ewma(data: np.ndarray, window: int):
|
||||
:return:
|
||||
"""
|
||||
alpha = 1 / window
|
||||
# scale = 1 / (1 - alpha)
|
||||
scale = 1 / (1 - alpha)
|
||||
n = data.shape[0]
|
||||
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
|
||||
weights = (1 - alpha) ** np.arange(n)
|
||||
pw0 = (1 - alpha) ** (n - 1)
|
||||
mult = data * pw0 * scale_arr
|
||||
cumsums = mult.cumsum()
|
||||
return cumsums * scale_arr[::-1] / weights.cumsum()
|
||||
out = cumsums * scale_arr[::-1] / weights.cumsum()
|
||||
|
||||
return out
|
||||
|
||||
@@ -33,7 +33,7 @@ def alma(candles: np.ndarray, period: int = 9, sigma: float = 6.0, distribution_
|
||||
wtds = np.exp(-(np.arange(period) - m) ** 2 / dss)
|
||||
pnp_array3D = strided_axis0(source, len(wtds))
|
||||
res = np.zeros(source.shape)
|
||||
res[period - 1:] = np.tensordot(pnp_array3D, wtds, axes=(1, 0))[:]
|
||||
res[period - 1:] = np.tensordot(pnp_array3D, wtds, axes=((1), (0)))[:]
|
||||
res /= wtds.sum()
|
||||
res[res == 0] = np.nan
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ def cfo(candles: np.ndarray, period: int = 14, scalar: float = 100, source_type:
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 14
|
||||
:param scalar: float - default: 100
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
@@ -25,10 +24,10 @@ def cfo(candles: np.ndarray, period: int = 14, scalar: float = 100, source_type:
|
||||
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
res = scalar * (source - talib.LINEARREG(source, timeperiod=period))
|
||||
res /= source
|
||||
cfo = scalar * (source - talib.LINEARREG(source, timeperiod=period))
|
||||
cfo /= source
|
||||
|
||||
if sequential:
|
||||
return res
|
||||
return cfo
|
||||
else:
|
||||
return None if np.isnan(res[-1]) else res[-1]
|
||||
return None if np.isnan(cfo[-1]) else cfo[-1]
|
||||
|
||||
@@ -32,15 +32,15 @@ def cg(candles: np.ndarray, period: int = 10, source_type: str = "close", sequen
|
||||
@njit
|
||||
def go_fast(source, period): # Function is compiled to machine code when called the first time
|
||||
res = np.full_like(source, fill_value=np.nan)
|
||||
for i in range(source.size):
|
||||
for i in range(0, source.size):
|
||||
if i > period:
|
||||
num = 0
|
||||
denom = 0
|
||||
for count in range(period - 1):
|
||||
for count in range(0, period - 1):
|
||||
close = source[i - count]
|
||||
if not np.isnan(close):
|
||||
num += (1 + count) * close
|
||||
denom += close
|
||||
num = num + (1 + count) * close
|
||||
denom = denom + close
|
||||
result = -num / denom if denom != 0 else 0
|
||||
res[i] = result
|
||||
return res
|
||||
|
||||
@@ -14,7 +14,7 @@ def chande(candles: np.ndarray, period: int = 22, mult: float = 3.0, direction:
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 22
|
||||
:param mult: float - default: 3.0
|
||||
:param period: float - default: 3.0
|
||||
:param direction: str - default: "long"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
@@ -40,10 +40,10 @@ def chande(candles: np.ndarray, period: int = 22, mult: float = 3.0, direction:
|
||||
return result if sequential else result[-1]
|
||||
|
||||
|
||||
def filter1d_same(a: np.ndarray, W: int, max_or_min: str, fillna=np.nan):
|
||||
def filter1d_same(a: np.ndarray, W: int, type: str, fillna=np.nan):
|
||||
out_dtype = np.full(0, fillna).dtype
|
||||
hW = (W - 1) // 2 # Half window size
|
||||
if max_or_min == 'max':
|
||||
if type == 'max':
|
||||
out = maximum_filter1d(a, size=W, origin=hW)
|
||||
else:
|
||||
out = minimum_filter1d(a, size=W, origin=hW)
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Union
|
||||
import numpy as np
|
||||
import talib
|
||||
|
||||
from jesse.helpers import slice_candles
|
||||
from jesse.helpers import get_candle_source, slice_candles
|
||||
|
||||
|
||||
def chop(candles: np.ndarray, period: int = 14, scalar: float = 100, drift: int = 1, sequential: bool = False) -> Union[
|
||||
@@ -13,8 +13,6 @@ def chop(candles: np.ndarray, period: int = 14, scalar: float = 100, drift: int
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 30
|
||||
:param scalar: float - default: 100
|
||||
:param drift: int - default: 1
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
|
||||
@@ -28,7 +28,7 @@ def correlation_cycle(candles: np.ndarray, period: int = 20, threshold: int = 9,
|
||||
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
realPart, imagPart, angle = go_fast(source, period)
|
||||
realPart, imagPart, angle = go_fast(source, period, threshold)
|
||||
|
||||
priorAngle = np_shift(angle, 1, fill_value=np.nan)
|
||||
angle = np.where(np.logical_and(priorAngle > angle, priorAngle - angle < 270.0), priorAngle, angle)
|
||||
@@ -43,7 +43,7 @@ def correlation_cycle(candles: np.ndarray, period: int = 20, threshold: int = 9,
|
||||
|
||||
|
||||
@njit
|
||||
def go_fast(source, period): # Function is compiled to machine code when called the first time
|
||||
def go_fast(source, period, threshold): # Function is compiled to machine code when called the first time
|
||||
# Correlation Cycle Function
|
||||
PIx2 = 4.0 * np.arcsin(1.0)
|
||||
period = max(2, period)
|
||||
@@ -65,28 +65,31 @@ def go_fast(source, period): # Function is compiled to machine code when called
|
||||
|
||||
for j in range(period):
|
||||
jMinusOne = j + 1
|
||||
X = 0 if np.isnan(source[i - jMinusOne]) else source[i - jMinusOne]
|
||||
if np.isnan(source[i - jMinusOne]):
|
||||
X = 0
|
||||
else:
|
||||
X = source[i - jMinusOne]
|
||||
temp = PIx2 * jMinusOne / period
|
||||
Yc = np.cos(temp)
|
||||
Ys = -np.sin(temp)
|
||||
Rx += X
|
||||
Ix += X
|
||||
Rxx += X * X
|
||||
Ixx += X * X
|
||||
Rxy += X * Yc
|
||||
Ixy += X * Ys
|
||||
Ryy += Yc * Yc
|
||||
Iyy += Ys * Ys
|
||||
Ry += Yc
|
||||
Iy += Ys
|
||||
Rx = Rx + X
|
||||
Ix = Ix + X
|
||||
Rxx = Rxx + X * X
|
||||
Ixx = Ixx + X * X
|
||||
Rxy = Rxy + X * Yc
|
||||
Ixy = Ixy + X * Ys
|
||||
Ryy = Ryy + Yc * Yc
|
||||
Iyy = Iyy + Ys * Ys
|
||||
Ry = Ry + Yc
|
||||
Iy = Iy + Ys
|
||||
|
||||
temp_1 = period * Rxx - Rx**2
|
||||
temp_2 = period * Ryy - Ry**2
|
||||
temp_1 = period * Rxx - Rx * Rx
|
||||
temp_2 = period * Ryy - Ry * Ry
|
||||
if temp_1 > 0.0 and temp_2 > 0.0:
|
||||
realPart[i] = (period * Rxy - Rx * Ry) / np.sqrt(temp_1 * temp_2)
|
||||
|
||||
temp_1 = period * Ixx - Ix**2
|
||||
temp_2 = period * Iyy - Iy**2
|
||||
temp_1 = period * Ixx - Ix * Ix
|
||||
temp_2 = period * Iyy - Iy * Iy
|
||||
if temp_1 > 0.0 and temp_2 > 0.0:
|
||||
imagPart[i] = (period * Ixy - Ix * Iy) / np.sqrt(temp_1 * temp_2)
|
||||
|
||||
|
||||
@@ -39,11 +39,11 @@ def cwma(candles: np.ndarray, period: int = 14, source_type: str = "close", sequ
|
||||
def vpwma_fast(source, period):
|
||||
newseries = np.copy(source)
|
||||
for j in range(period + 1, source.shape[0]):
|
||||
my_sum = 0.0
|
||||
sum = 0.0
|
||||
weightSum = 0.0
|
||||
for i in range(period - 1):
|
||||
weight = np.power(period - i, 3)
|
||||
my_sum += (source[j - i] * weight)
|
||||
sum += (source[j - i] * weight)
|
||||
weightSum += weight
|
||||
newseries[j] = my_sum / weightSum
|
||||
newseries[j] = sum / weightSum
|
||||
return newseries
|
||||
|
||||
@@ -54,7 +54,7 @@ def damiani_volatmeter_fast(source, sed_std, atrvis, atrsed, vis_std,
|
||||
vol = np.full_like(source, 0)
|
||||
t = np.full_like(source, 0)
|
||||
for i in range(source.shape[0]):
|
||||
if i >= sed_std:
|
||||
if not (i < sed_std):
|
||||
vol[i] = atrvis[i] / atrsed[i] + lag_s * (vol[i - 1] - vol[i - 3])
|
||||
anti_thres = np.std(source[i - vis_std:i]) / np.std(source[i - sed_std:i])
|
||||
t[i] = threshold - anti_thres
|
||||
|
||||
@@ -14,7 +14,6 @@ def dec_osc(candles: np.ndarray, hp_period: int = 125, k: float = 1, source_type
|
||||
:param candles: np.ndarray
|
||||
:param hp_period: int - default: 125
|
||||
:param k: float - default: 1
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
|
||||
@@ -13,7 +13,6 @@ def decycler(candles: np.ndarray, hp_period: int = 125, source_type: str = "clos
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param hp_period: int - default: 125
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
|
||||
@@ -17,7 +17,6 @@ def devstop(candles: np.ndarray, period: int = 20, mult: float = 0, devtype: int
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 20
|
||||
:param mult: float - default: 0
|
||||
:param devtype: int - default: 0
|
||||
:param direction: str - default: long
|
||||
:param sequential: bool - default: False
|
||||
|
||||
|
||||
@@ -45,9 +45,9 @@ def edcf_fast(source, period):
|
||||
for i in range(period):
|
||||
distance = 0.0
|
||||
for lb in range(1, period):
|
||||
distance += np.power(source[j - i] - source[j - i - lb], 2)
|
||||
num += distance * source[j - i]
|
||||
coefSum += distance
|
||||
distance = distance + np.power(source[j - i] - source[j - i - lb], 2)
|
||||
num = num + (distance * source[j - i])
|
||||
coefSum = coefSum + distance
|
||||
newseries[j] = num / coefSum if coefSum != 0 else 0
|
||||
|
||||
return newseries
|
||||
|
||||
@@ -17,7 +17,6 @@ def epma(candles: np.ndarray, period: int = 11, offset: int = 4, source_type: st
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 14
|
||||
:param offset: int - default: 4
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
@@ -40,11 +39,11 @@ def epma(candles: np.ndarray, period: int = 11, offset: int = 4, source_type: st
|
||||
def epma_fast(source, period, offset):
|
||||
newseries = np.copy(source)
|
||||
for j in range(period + offset + 1 , source.shape[0]):
|
||||
my_sum = 0.0
|
||||
sum = 0.0
|
||||
weightSum = 0.0
|
||||
for i in range(period - 1):
|
||||
weight = period - i - offset
|
||||
my_sum += (source[j - i] * weight)
|
||||
sum += (source[j - i] * weight)
|
||||
weightSum += weight
|
||||
newseries[j] = 1 / weightSum * my_sum
|
||||
newseries[j] = 1 / weightSum * sum
|
||||
return newseries
|
||||
|
||||
@@ -20,10 +20,10 @@ def fisher(candles: np.ndarray, period: int = 9, sequential: bool = False) -> Fi
|
||||
"""
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
fisher_val, fisher_signal = ti.fisher(np.ascontiguousarray(candles[:, 3]), np.ascontiguousarray(candles[:, 4]),
|
||||
period=period)
|
||||
fisher, fisher_signal = ti.fisher(np.ascontiguousarray(candles[:, 3]), np.ascontiguousarray(candles[:, 4]),
|
||||
period=period)
|
||||
|
||||
if sequential:
|
||||
return FisherTransform(same_length(candles, fisher_val), same_length(candles, fisher_signal))
|
||||
return FisherTransform(same_length(candles, fisher), same_length(candles, fisher_signal))
|
||||
else:
|
||||
return FisherTransform(fisher_val[-1], fisher_signal[-1])
|
||||
return FisherTransform(fisher[-1], fisher_signal[-1])
|
||||
|
||||
@@ -31,12 +31,12 @@ def frama(candles: np.ndarray, window: int = 10, FC: int = 1, SC: int = 300, seq
|
||||
print("FRAMA n must be even. Adding one")
|
||||
n += 1
|
||||
|
||||
res = frame_fast(candles, n, SC, FC)
|
||||
frama = frame_fast(candles, n, SC, FC)
|
||||
|
||||
if sequential:
|
||||
return res
|
||||
return frama
|
||||
else:
|
||||
return res[-1]
|
||||
return frama[-1]
|
||||
|
||||
|
||||
@njit
|
||||
@@ -79,10 +79,10 @@ def frame_fast(candles, n, SC, FC):
|
||||
else:
|
||||
alphas[i] = alpha_
|
||||
|
||||
frama_val = np.zeros(candles.shape[0])
|
||||
frama_val[n - 1] = np.mean(candles[:, 2][:n])
|
||||
frama_val[:n - 1] = np.NaN
|
||||
frama = np.zeros(candles.shape[0])
|
||||
frama[n - 1] = np.mean(candles[:, 2][:n])
|
||||
frama[:n - 1] = np.NaN
|
||||
|
||||
for i in range(n, frama_val.shape[0]):
|
||||
frama_val[i] = (alphas[i] * candles[:, 2][i]) + (1 - alphas[i]) * frama_val[i - 1]
|
||||
return frama_val
|
||||
for i in range(n, frama.shape[0]):
|
||||
frama[i] = (alphas[i] * candles[:, 2][i]) + (1 - alphas[i]) * frama[i - 1]
|
||||
return frama
|
||||
|
||||
@@ -43,7 +43,7 @@ def fibonacci(n: int = 2) -> np.ndarray:
|
||||
|
||||
result = np.array([a])
|
||||
|
||||
for _ in range(n):
|
||||
for i in range(0, n):
|
||||
a, b = b, a + b
|
||||
result = np.append(result, a)
|
||||
|
||||
|
||||
@@ -48,11 +48,13 @@ def numpy_ewma(data, window):
|
||||
:return:
|
||||
"""
|
||||
alpha = 1 / window
|
||||
# scale = 1 / (1 - alpha)
|
||||
scale = 1 / (1 - alpha)
|
||||
n = data.shape[0]
|
||||
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
|
||||
weights = (1 - alpha) ** np.arange(n)
|
||||
pw0 = (1 - alpha) ** (n - 1)
|
||||
mult = data * pw0 * scale_arr
|
||||
cumsums = mult.cumsum()
|
||||
return cumsums * scale_arr[::-1] / weights.cumsum()
|
||||
out = cumsums * scale_arr[::-1] / weights.cumsum()
|
||||
|
||||
return out
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
from numba import njit
|
||||
except ImportError:
|
||||
njit = lambda a : a
|
||||
|
||||
from jesse.helpers import get_candle_source, slice_candles
|
||||
from scipy import signal
|
||||
|
||||
def hurst_exponent(candles: np.ndarray, min_chunksize: int = 8, max_chunksize: int = 200, num_chunksize:int=5, method:int=1, source_type: str = "close") -> float:
|
||||
"""
|
||||
Hurst Exponent
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param min_chunksize: int - default: 8
|
||||
:param max_chunksize: int - default: 200
|
||||
:param num_chunksize: int - default: 5
|
||||
:param method: int - default: 1 - 0: RS | 1: DMA | 2: DSOD
|
||||
:param source_type: str - default: "close"
|
||||
|
||||
:return: float
|
||||
"""
|
||||
|
||||
if len(candles.shape) == 1:
|
||||
source = candles
|
||||
else:
|
||||
candles = slice_candles(candles, False)
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
if method == 0:
|
||||
h = hurst_rs(np.diff(source), min_chunksize, max_chunksize, num_chunksize)
|
||||
elif method == 1:
|
||||
h = hurst_dma(source, min_chunksize, max_chunksize, num_chunksize)
|
||||
elif method == 2:
|
||||
h = hurst_dsod(source)
|
||||
else:
|
||||
raise NotImplementedError('The method choose is not implemented.')
|
||||
|
||||
return None if np.isnan(h) else h
|
||||
|
||||
|
||||
@njit
|
||||
def hurst_rs(x, min_chunksize, max_chunksize, num_chunksize):
|
||||
"""Estimate the Hurst exponent using R/S method.
|
||||
Estimates the Hurst (H) exponent using the R/S method from the time series.
|
||||
The R/S method consists of dividing the series into pieces of equal size
|
||||
`series_len` and calculating the rescaled range. This repeats the process
|
||||
for several `series_len` values and adjusts data regression to obtain the H.
|
||||
`series_len` will take values between `min_chunksize` and `max_chunksize`,
|
||||
the step size from `min_chunksize` to `max_chunksize` can be controlled
|
||||
through the parameter `step_chunksize`.
|
||||
Parameters
|
||||
----------
|
||||
x : 1D-array
|
||||
A time series to calculate hurst exponent, must have more elements
|
||||
than `min_chunksize` and `max_chunksize`.
|
||||
min_chunksize : int
|
||||
This parameter allow you control the minimum window size.
|
||||
max_chunksize : int
|
||||
This parameter allow you control the maximum window size.
|
||||
num_chunksize : int
|
||||
This parameter allow you control the size of the step from minimum to
|
||||
maximum window size. Bigger step means fewer calculations.
|
||||
out : 1-element-array, optional
|
||||
one element array to store the output.
|
||||
Returns
|
||||
-------
|
||||
H : float
|
||||
A estimation of Hurst exponent.
|
||||
References
|
||||
----------
|
||||
Hurst, H. E. (1951). Long term storage capacity of reservoirs. ASCE
|
||||
Transactions, 116(776), 770-808.
|
||||
Alessio, E., Carbone, A., Castelli, G. et al. Eur. Phys. J. B (2002) 27:
|
||||
197. http://dx.doi.org/10.1140/epjb/e20020150
|
||||
"""
|
||||
N = len(x)
|
||||
max_chunksize += 1
|
||||
rs_tmp = np.empty(N, dtype=np.float64)
|
||||
chunk_size_list = np.linspace(min_chunksize, max_chunksize, num_chunksize) \
|
||||
.astype(np.int64)
|
||||
rs_values_list = np.empty(num_chunksize, dtype=np.float64)
|
||||
|
||||
# 1. The series is divided into chunks of chunk_size_list size
|
||||
for i in range(num_chunksize):
|
||||
chunk_size = chunk_size_list[i]
|
||||
|
||||
# 2. it iterates on the indices of the first observation of each chunk
|
||||
number_of_chunks = int(len(x) / chunk_size)
|
||||
|
||||
for idx in range(number_of_chunks):
|
||||
# next means no overlapping
|
||||
# convert index to index selection of each chunk
|
||||
ini = idx * chunk_size
|
||||
end = ini + chunk_size
|
||||
chunk = x[ini:end]
|
||||
|
||||
# 2.1 Calculate the RS (chunk_size)
|
||||
z = np.cumsum(chunk - np.mean(chunk))
|
||||
rs_tmp[idx] = np.divide(
|
||||
np.max(z) - np.min(z), # range
|
||||
np.nanstd(chunk) # standar deviation
|
||||
)
|
||||
|
||||
# 3. Average of RS(chunk_size)
|
||||
rs_values_list[i] = np.nanmean(rs_tmp[:idx + 1])
|
||||
|
||||
# 4. calculate the Hurst exponent.
|
||||
H, c = np.linalg.lstsq(
|
||||
a=np.vstack((np.log(chunk_size_list), np.ones(num_chunksize))).T,
|
||||
b=np.log(rs_values_list)
|
||||
)[0]
|
||||
|
||||
return H
|
||||
|
||||
def hurst_dma(prices, min_chunksize=8, max_chunksize=200, num_chunksize=5):
|
||||
"""Estimate the Hurst exponent using R/S method.
|
||||
|
||||
Estimates the Hurst (H) exponent using the DMA method from the time series.
|
||||
The DMA method consists on calculate the moving average of size `series_len`
|
||||
and subtract it to the original series and calculating the standard
|
||||
deviation of that result. This repeats the process for several `series_len`
|
||||
values and adjusts data regression to obtain the H. `series_len` will take
|
||||
values between `min_chunksize` and `max_chunksize`, the step size from
|
||||
`min_chunksize` to `max_chunksize` can be controlled through the parameter
|
||||
`step_chunksize`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
prices
|
||||
min_chunksize
|
||||
max_chunksize
|
||||
num_chunksize
|
||||
|
||||
Returns
|
||||
-------
|
||||
hurst_exponent : float
|
||||
Estimation of hurst exponent.
|
||||
|
||||
References
|
||||
----------
|
||||
Alessio, E., Carbone, A., Castelli, G. et al. Eur. Phys. J. B (2002) 27:
|
||||
197. https://dx.doi.org/10.1140/epjb/e20020150
|
||||
|
||||
"""
|
||||
max_chunksize += 1
|
||||
N = len(prices)
|
||||
n_list = np.arange(min_chunksize, max_chunksize, num_chunksize, dtype=np.int64)
|
||||
dma_list = np.empty(len(n_list))
|
||||
factor = 1 / (N - max_chunksize)
|
||||
# sweeping n_list
|
||||
for i, n in enumerate(n_list):
|
||||
b = np.divide([n - 1] + (n - 1) * [-1], n) # do the same as: y - y_ma_n
|
||||
noise = np.power(signal.lfilter(b, 1, prices)[max_chunksize:], 2)
|
||||
dma_list[i] = np.sqrt(factor * np.sum(noise))
|
||||
|
||||
H, const = np.linalg.lstsq(
|
||||
a=np.vstack([np.log10(n_list), np.ones(len(n_list))]).T,
|
||||
b=np.log10(dma_list), rcond=None
|
||||
)[0]
|
||||
return H
|
||||
|
||||
|
||||
def hurst_dsod(x):
|
||||
"""Estimate Hurst exponent on data timeseries.
|
||||
|
||||
The estimation is based on the discrete second order derivative. Consists on
|
||||
get two different noise of the original series and calculate the standard
|
||||
deviation and calculate the slope of two point with that values.
|
||||
source: https://gist.github.com/wmvanvliet/d883c3fe1402c7ced6fc
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : numpy array
|
||||
time series to estimate the Hurst exponent for.
|
||||
|
||||
Returns
|
||||
-------
|
||||
h : float
|
||||
The estimation of the Hurst exponent for the given time series.
|
||||
|
||||
References
|
||||
----------
|
||||
Istas, J.; G. Lang (1994), “Quadratic variations and estimation of the local
|
||||
Hölder index of data Gaussian process,” Ann. Inst. Poincaré, 33, pp. 407–436.
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
This hurst_ets is data literal traduction of wfbmesti.m of waveleet toolbox
|
||||
from matlab.
|
||||
"""
|
||||
y = np.cumsum(np.diff(x, axis=0), axis=0)
|
||||
|
||||
# second order derivative
|
||||
b1 = [1, -2, 1]
|
||||
y1 = signal.lfilter(b1, 1, y, axis=0)
|
||||
y1 = y1[len(b1) - 1:] # first values contain filter artifacts
|
||||
|
||||
# wider second order derivative
|
||||
b2 = [1, 0, -2, 0, 1]
|
||||
y2 = signal.lfilter(b2, 1, y, axis=0)
|
||||
y2 = y2[len(b2) - 1:] # first values contain filter artifacts
|
||||
|
||||
s1 = np.mean(y1 ** 2, axis=0)
|
||||
s2 = np.mean(y2 ** 2, axis=0)
|
||||
|
||||
return 0.5 * np.log2(s2 / s1)
|
||||
@@ -6,7 +6,7 @@ try:
|
||||
except ImportError:
|
||||
njit = lambda a : a
|
||||
|
||||
from jesse.helpers import get_candle_source, slice_candles, same_length
|
||||
from jesse.helpers import get_candle_source, slice_candles
|
||||
|
||||
|
||||
def hwma(candles: np.ndarray, na: float = 0.2, nb: float = 0.1, nc: float = 0.1, source_type: str = "close", sequential: bool = False) -> Union[
|
||||
@@ -33,9 +33,7 @@ def hwma(candles: np.ndarray, na: float = 0.2, nb: float = 0.1, nc: float = 0.1,
|
||||
candles = slice_candles(candles, sequential)
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
source_without_nan = source[~np.isnan(source)]
|
||||
res = hwma_fast(source_without_nan, na, nb, nc)
|
||||
res = same_length(candles, res)
|
||||
res = hwma_fast(source, na, nb, nc)
|
||||
|
||||
return res if sequential else res[-1]
|
||||
|
||||
|
||||
@@ -32,12 +32,21 @@ def ichimoku_cloud_seq(candles: np.ndarray, conversion_line_period: int = 9, bas
|
||||
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
conversion_line = _line_helper(candles, conversion_line_period)
|
||||
base_line = _line_helper(candles, base_line_period)
|
||||
span_b_pre = _line_helper(candles, lagging_line_period)
|
||||
small_ph = talib.MAX(candles[:, 3], conversion_line_period)
|
||||
small_pl = talib.MIN(candles[:, 4], conversion_line_period)
|
||||
conversion_line = (small_ph + small_pl) / 2
|
||||
|
||||
mid_ph = talib.MAX(candles[:, 3], base_line_period)
|
||||
mid_pl = talib.MIN(candles[:, 4], base_line_period)
|
||||
base_line = (mid_ph + mid_pl) / 2
|
||||
|
||||
long_ph = talib.MAX(candles[:, 3], lagging_line_period)
|
||||
long_pl = talib.MIN(candles[:, 4], lagging_line_period)
|
||||
span_b_pre = (long_ph + long_pl) / 2
|
||||
span_b = np_shift(span_b_pre, displacement, fill_value=np.nan)
|
||||
span_a_pre = (conversion_line + base_line) / 2
|
||||
span_a = np_shift(span_a_pre, displacement, fill_value=np.nan)
|
||||
|
||||
lagging_line = np_shift(candles[:, 2], displacement - 1, fill_value=np.nan)
|
||||
|
||||
if sequential:
|
||||
@@ -45,8 +54,3 @@ def ichimoku_cloud_seq(candles: np.ndarray, conversion_line_period: int = 9, bas
|
||||
else:
|
||||
return IchimokuCloud(conversion_line[-1], base_line[-1], span_a[-1], span_b[-1], lagging_line[-1],
|
||||
span_a_pre[-1], span_b_pre[-1])
|
||||
|
||||
def _line_helper(candles, period):
|
||||
small_ph = talib.MAX(candles[:, 3], period)
|
||||
small_pl = talib.MIN(candles[:, 4], period)
|
||||
return (small_ph + small_pl) / 2
|
||||
|
||||
@@ -33,7 +33,7 @@ def jma(candles: np.ndarray, period:int=7, phase:float=50, power:int=2, source_t
|
||||
|
||||
@njit
|
||||
def jma_helper(src, phaseRatio, beta, alpha):
|
||||
jma_val = np.copy(src)
|
||||
jma = np.copy(src)
|
||||
|
||||
e0 = np.full_like(src, 0)
|
||||
e1 = np.full_like(src, 0)
|
||||
@@ -42,7 +42,7 @@ def jma_helper(src, phaseRatio, beta, alpha):
|
||||
for i in range(1, src.shape[0]):
|
||||
e0[i] = (1 - alpha) * src[i] + alpha * e0[i-1]
|
||||
e1[i] = (src[i] - e0[i]) * (1 - beta) + beta * e1[i-1]
|
||||
e2[i] = (e0[i] + phaseRatio * e1[i] - jma_val[i - 1]) * pow(1 - alpha, 2) + pow(alpha, 2) * e2[i - 1]
|
||||
jma_val[i] = e2[i] + jma_val[i - 1]
|
||||
e2[i] = (e0[i] + phaseRatio * e1[i] - jma[i-1]) * pow(1 - alpha, 2) + pow(alpha, 2) * e2[i-1]
|
||||
jma[i] = e2[i] + jma[i-1]
|
||||
|
||||
return jma_val
|
||||
return jma
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
import talib
|
||||
|
||||
from jesse.indicators.ma import ma
|
||||
from jesse.helpers import slice_candles
|
||||
|
||||
|
||||
def kaufmanstop(candles: np.ndarray, period: int = 22, mult: float = 2, direction: str = "long", matype: int = 0,
|
||||
def kaufmanstop(candles: np.ndarray, period: int = 22, mult: float = 2, direction: str = "long",
|
||||
sequential: bool = False) -> Union[
|
||||
float, np.ndarray]:
|
||||
"""
|
||||
@@ -16,7 +16,6 @@ def kaufmanstop(candles: np.ndarray, period: int = 22, mult: float = 2, directio
|
||||
:param period: int - default: 22
|
||||
:param mult: float - default: 2
|
||||
:param direction: str - default: long
|
||||
:param matype: int - default: 0
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
@@ -26,7 +25,11 @@ def kaufmanstop(candles: np.ndarray, period: int = 22, mult: float = 2, directio
|
||||
high = candles[:, 3]
|
||||
low = candles[:, 4]
|
||||
|
||||
hl_diff = ma(high - low, period=period, matype=matype, sequential=True)
|
||||
hl_diff = talib.SMA(high - low, period)
|
||||
|
||||
if direction == "long":
|
||||
res = hl_diff * mult - low
|
||||
else:
|
||||
res = hl_diff * mult + high
|
||||
|
||||
res = low - hl_diff * mult if direction == "long" else high + hl_diff * mult
|
||||
return res if sequential else res[-1]
|
||||
|
||||
@@ -23,7 +23,7 @@ def kurtosis(candles: np.ndarray, period: int = 5, source_type: str = "hl2", seq
|
||||
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
swv = sliding_window_view(source, window_shape=period)
|
||||
kurtosis_val = stats.kurtosis(swv, axis=-1)
|
||||
res = same_length(source, kurtosis_val)
|
||||
kurtosis = stats.kurtosis(swv, axis=-1)
|
||||
res = same_length(source, kurtosis)
|
||||
|
||||
return res if sequential else res[-1]
|
||||
|
||||
@@ -64,5 +64,9 @@ def lrsi_fast(alpha, candles):
|
||||
else:
|
||||
cd = cd + l3[i] - l2[i]
|
||||
|
||||
rsi[i] = 0 if cu + cd == 0 else cu / (cu + cd)
|
||||
if cu + cd == 0:
|
||||
rsi[i] = 0
|
||||
else:
|
||||
rsi[i] = cu / (cu + cd)
|
||||
|
||||
return rsi
|
||||
|
||||
@@ -12,69 +12,67 @@ def ma(candles: np.ndarray, period: int = 30, matype: int = 0, source_type: str
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 30
|
||||
:param matype: int - default: 0
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
"""
|
||||
|
||||
candles = slice_candles(candles, sequential)
|
||||
# Accept normal array too.
|
||||
if len(candles.shape) == 1:
|
||||
source = candles
|
||||
else:
|
||||
candles = slice_candles(candles, sequential)
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
if matype <= 8:
|
||||
from talib import MA
|
||||
if len(candles.shape) != 1:
|
||||
candles = get_candle_source(candles, source_type=source_type)
|
||||
res = MA(candles, timeperiod=period, matype=matype)
|
||||
res = MA(source, timeperiod=period, matype=matype)
|
||||
elif matype == 9:
|
||||
from . import fwma
|
||||
res = fwma(candles, period, source_type=source_type, sequential=True)
|
||||
res = fwma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 10:
|
||||
from . import hma
|
||||
res = hma(candles, period, source_type=source_type, sequential=True)
|
||||
res = hma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 11:
|
||||
from talib import LINEARREG
|
||||
if len(candles.shape) != 1:
|
||||
candles = get_candle_source(candles, source_type=source_type)
|
||||
res = LINEARREG(candles, period)
|
||||
res = LINEARREG(source, period)
|
||||
elif matype == 12:
|
||||
from . import wilders
|
||||
res = wilders(candles, period, source_type=source_type, sequential=True)
|
||||
res = wilders(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 13:
|
||||
from . import sinwma
|
||||
res = sinwma(candles, period, source_type=source_type, sequential=True)
|
||||
res = sinwma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 14:
|
||||
from . import supersmoother
|
||||
res = supersmoother(candles, period, source_type=source_type, sequential=True)
|
||||
res = supersmoother(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 15:
|
||||
from . import supersmoother_3_pole
|
||||
res = supersmoother_3_pole(candles, period, source_type=source_type, sequential=True)
|
||||
res = supersmoother_3_pole(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 16:
|
||||
from . import gauss
|
||||
res = gauss(candles, period, source_type=source_type, sequential=True)
|
||||
res = gauss(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 17:
|
||||
from . import high_pass
|
||||
res = high_pass(candles, period, source_type=source_type, sequential=True)
|
||||
res = high_pass(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 18:
|
||||
from . import high_pass_2_pole
|
||||
res = high_pass_2_pole(candles, period, source_type=source_type, sequential=True)
|
||||
res = high_pass_2_pole(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 19:
|
||||
from talib import HT_TRENDLINE
|
||||
if len(candles.shape) != 1:
|
||||
candles = get_candle_source(candles, source_type=source_type)
|
||||
res = HT_TRENDLINE(candles)
|
||||
res = HT_TRENDLINE(source)
|
||||
elif matype == 20:
|
||||
from . import jma
|
||||
res = jma(candles, period, source_type=source_type, sequential=True)
|
||||
res = jma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 21:
|
||||
from . import reflex
|
||||
res = reflex(candles, period, source_type=source_type, sequential=True)
|
||||
res = reflex(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 22:
|
||||
from . import trendflex
|
||||
res = trendflex(candles, period, source_type=source_type, sequential=True)
|
||||
res = trendflex(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 23:
|
||||
from . import smma
|
||||
res = smma(candles, period, source_type=source_type, sequential=True)
|
||||
res = smma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 24:
|
||||
if len(candles.shape) == 1:
|
||||
raise ValueError("vwma only works with normal candles.")
|
||||
@@ -82,50 +80,50 @@ def ma(candles: np.ndarray, period: int = 30, matype: int = 0, source_type: str
|
||||
res = vwma(candles, period, source_type=source_type, sequential=True)
|
||||
elif matype == 25:
|
||||
from . import pwma
|
||||
res = pwma(candles, period, source_type=source_type, sequential=True)
|
||||
res = pwma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 26:
|
||||
from . import swma
|
||||
res = swma(candles, period, source_type=source_type, sequential=True)
|
||||
res = swma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 27:
|
||||
from . import alma
|
||||
res = alma(candles, period, source_type=source_type, sequential=True)
|
||||
res = alma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 28:
|
||||
from . import hwma
|
||||
res = hwma(candles, source_type=source_type, sequential=True)
|
||||
res = hwma(source, source_type=source_type, sequential=True)
|
||||
elif matype == 29:
|
||||
from . import vwap
|
||||
if len(candles.shape) == 1:
|
||||
raise ValueError("vwap only works with normal candles.")
|
||||
res = vwap(candles, source_type=source_type, sequential=True)
|
||||
res = vwap(source, source_type=source_type, sequential=True)
|
||||
elif matype == 30:
|
||||
from . import nma
|
||||
res = nma(candles, period, source_type=source_type, sequential=True)
|
||||
res = nma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 31:
|
||||
from . import edcf
|
||||
res = edcf(candles, period, source_type=source_type, sequential=True)
|
||||
res = edcf(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 32:
|
||||
from . import mwdx
|
||||
res = mwdx(candles, source_type=source_type, sequential=True)
|
||||
res = mwdx(source, source_type=source_type, sequential=True)
|
||||
elif matype == 33:
|
||||
from . import maaq
|
||||
res = maaq(candles, period, source_type=source_type, sequential=True)
|
||||
res = maaq(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 34:
|
||||
from . import srwma
|
||||
res = srwma(candles, period, source_type=source_type, sequential=True)
|
||||
res = srwma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 35:
|
||||
from . import sqwma
|
||||
res = sqwma(candles, period, source_type=source_type, sequential=True)
|
||||
res = sqwma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 36:
|
||||
from . import vpwma
|
||||
res = vpwma(candles, period, source_type=source_type, sequential=True)
|
||||
res = vpwma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 37:
|
||||
from . import cwma
|
||||
res = cwma(candles, period, source_type=source_type, sequential=True)
|
||||
res = cwma(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 38:
|
||||
from . import jsa
|
||||
res = jsa(candles, period, source_type=source_type, sequential=True)
|
||||
res = jsa(source, period, source_type=source_type, sequential=True)
|
||||
elif matype == 39:
|
||||
from . import epma
|
||||
res = epma(candles, period, source_type=source_type, sequential=True)
|
||||
res = epma(source, period, source_type=source_type, sequential=True)
|
||||
|
||||
return res if sequential else res[-1]
|
||||
|
||||
@@ -7,7 +7,7 @@ try:
|
||||
except ImportError:
|
||||
njit = lambda a : a
|
||||
|
||||
from jesse.helpers import get_candle_source, slice_candles, np_shift, same_length
|
||||
from jesse.helpers import get_candle_source, slice_candles, same_length, np_shift
|
||||
|
||||
|
||||
def maaq(candles: np.ndarray, period: int = 11, fast_period: int = 2, slow_period: int = 30, source_type: str = "close", sequential: bool = False) -> Union[
|
||||
@@ -16,7 +16,7 @@ def maaq(candles: np.ndarray, period: int = 11, fast_period: int = 2, slow_perio
|
||||
Moving Average Adaptive Q
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 11
|
||||
:param period: int - default: 14
|
||||
:param fast_period: int - default: 2
|
||||
:param slow_period: int - default: 30
|
||||
:param source_type: str - default: "close"
|
||||
@@ -32,8 +32,6 @@ def maaq(candles: np.ndarray, period: int = 11, fast_period: int = 2, slow_perio
|
||||
candles = slice_candles(candles, sequential)
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
source = source[~np.isnan(source)]
|
||||
|
||||
diff = np.abs(source - np_shift(source, 1, np.nan))
|
||||
signal = np.abs(source - np_shift(source, period, np.nan))
|
||||
noise = talib.SUM(diff, period)
|
||||
@@ -46,7 +44,6 @@ def maaq(candles: np.ndarray, period: int = 11, fast_period: int = 2, slow_perio
|
||||
temp = np.power((ratio * fastSc) + slowSc, 2)
|
||||
|
||||
res = maaq_fast(source, temp, period)
|
||||
res = same_length(candles, res)
|
||||
|
||||
return res if sequential else res[-1]
|
||||
|
||||
|
||||
@@ -27,10 +27,10 @@ def macd(candles: np.ndarray, fast_period: int = 12, slow_period: int = 26, sign
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
macd_val, macdsignal, macdhist = talib.MACD(source, fastperiod=fast_period, slowperiod=slow_period,
|
||||
signalperiod=signal_period)
|
||||
macd, macdsignal, macdhist = talib.MACD(source, fastperiod=fast_period, slowperiod=slow_period,
|
||||
signalperiod=signal_period)
|
||||
|
||||
if sequential:
|
||||
return MACD(macd_val, macdsignal, macdhist)
|
||||
return MACD(macd, macdsignal, macdhist)
|
||||
else:
|
||||
return MACD(macd_val[-1], macdsignal[-1], macdhist[-1])
|
||||
return MACD(macd[-1], macdsignal[-1], macdhist[-1])
|
||||
|
||||
@@ -2,7 +2,8 @@ from collections import namedtuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
from jesse.helpers import get_candle_source, slice_candles, same_length
|
||||
from jesse.helpers import get_candle_source
|
||||
from jesse.helpers import slice_candles
|
||||
from jesse.indicators.ma import ma
|
||||
|
||||
MACDEXT = namedtuple('MACDEXT', ['macd', 'signal', 'hist'])
|
||||
@@ -16,11 +17,11 @@ def macdext(candles: np.ndarray, fast_period: int = 12, fast_matype: int = 0, sl
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param fast_period: int - default: 12
|
||||
:param fast_matype: int - default: 0
|
||||
:param fastmatype: int - default: 0
|
||||
:param slow_period: int - default: 26
|
||||
:param slow_matype: int - default: 0
|
||||
:param slowmatype: int - default: 0
|
||||
:param signal_period: int - default: 9
|
||||
:param signal_matype: int - default: 0
|
||||
:param signalmatype: int - default: 0
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
@@ -28,23 +29,10 @@ def macdext(candles: np.ndarray, fast_period: int = 12, fast_matype: int = 0, sl
|
||||
"""
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
if fast_matype == 29 or slow_matype == 29 or signal_matype == 29:
|
||||
raise ValueError("VWAP not supported in macdext.")
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
ma_fast = ma(candles, period=fast_period, matype=fast_matype, source_type=source_type, sequential=True)
|
||||
ma_slow = ma(candles, period=slow_period, matype=slow_matype, source_type=source_type, sequential=True)
|
||||
macd = ma_fast - ma_slow
|
||||
|
||||
if signal_matype == 24:
|
||||
# volume needed.
|
||||
candles[:, 2] = macd
|
||||
candles_without_nan = candles[~np.isnan(candles).any(axis=1)]
|
||||
macdsignal = ma(candles_without_nan, period=signal_period, matype=signal_matype, source_type="close", sequential=True)
|
||||
else:
|
||||
macd_without_nan = macd[~np.isnan(macd)]
|
||||
macdsignal = ma(macd_without_nan, period=signal_period, matype=signal_matype, sequential=True)
|
||||
|
||||
macdsignal = same_length(candles, macdsignal)
|
||||
macd = ma(source, period=fast_period, matype=fast_matype, sequential=True) - ma(source, period=slow_period, matype=slow_matype, sequential=True)
|
||||
macdsignal = ma(macd, period=signal_period, matype=signal_matype, sequential=True)
|
||||
macdhist = macd - macdsignal
|
||||
|
||||
if sequential:
|
||||
|
||||
@@ -28,9 +28,9 @@ def mama(candles: np.ndarray, fastlimit: float = 0.5, slowlimit: float = 0.05, s
|
||||
candles = slice_candles(candles, sequential)
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
mama_val, fama = talib.MAMA(source, fastlimit=fastlimit, slowlimit=slowlimit)
|
||||
mama, fama = talib.MAMA(source, fastlimit=fastlimit, slowlimit=slowlimit)
|
||||
|
||||
if sequential:
|
||||
return MAMA(mama_val, fama)
|
||||
return MAMA(mama, fama)
|
||||
else:
|
||||
return MAMA(mama_val[-1], fama[-1])
|
||||
return MAMA(mama[-1], fama[-1])
|
||||
|
||||
@@ -18,7 +18,6 @@ def mcginley_dynamic(candles: np.ndarray, period: int = 10, k: float = 0.6, sour
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 10
|
||||
:param k: float - default: 0.6
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
|
||||
@@ -40,4 +40,4 @@ def minmax(candles: np.ndarray, order: int = 3, sequential: bool = False) -> EXT
|
||||
if sequential:
|
||||
return EXTREMA(is_min, is_max, last_min, last_max)
|
||||
else:
|
||||
return EXTREMA(is_min[-(order+1)], is_max[-(order+1)], last_min[-1], last_max[-1])
|
||||
return EXTREMA(is_min[-1], is_max[-1], last_min[-1], last_max[-1])
|
||||
|
||||
@@ -16,7 +16,7 @@ def mwdx(candles: np.ndarray, factor: float = 0.2, source_type: str = "close", s
|
||||
MWDX Average
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param factor: float - default: 0.2
|
||||
:param factor: float - default: 14
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
import numpy as np
|
||||
|
||||
from collections import namedtuple
|
||||
@@ -37,7 +38,7 @@ def pma(candles: np.ndarray, source_type: str = "hl2", sequential: bool = False)
|
||||
return PMA(predict[-1], trigger[-1])
|
||||
|
||||
|
||||
@njit
|
||||
|
||||
def pma_fast(source):
|
||||
predict = np.full_like(source, np.nan)
|
||||
trigger = np.full_like(source, np.nan)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
from typing import Union
|
||||
from functools import reduce
|
||||
from operator import mul
|
||||
@@ -44,9 +45,11 @@ def pascals_triangle(n: int = None) -> np.ndarray:
|
||||
n = int(np.fabs(n)) if n is not None else 0
|
||||
|
||||
# Calculation
|
||||
triangle = np.array([combination(n=n, r=i) for i in range(n + 1)])
|
||||
triangle = np.array([combination(n=n, r=i) for i in range(0, n + 1)])
|
||||
triangle_sum = np.sum(triangle)
|
||||
return triangle / triangle_sum
|
||||
triangle_weights = triangle / triangle_sum
|
||||
|
||||
return triangle_weights
|
||||
|
||||
|
||||
def combination(n, r) -> int:
|
||||
|
||||
@@ -45,13 +45,13 @@ def reflex_fast(ssf, period):
|
||||
ms = np.full_like(ssf, 0)
|
||||
sums = np.full_like(ssf, 0)
|
||||
for i in range(ssf.shape[0]):
|
||||
if i >= period:
|
||||
if not (i < period):
|
||||
slope = (ssf[i - period] - ssf[i]) / period
|
||||
my_sum = 0
|
||||
sum = 0
|
||||
for t in range(1, period + 1):
|
||||
my_sum = my_sum + (ssf[i] + t * slope) - ssf[i - t]
|
||||
my_sum /= period
|
||||
sums[i] = my_sum
|
||||
sum = sum + (ssf[i] + t * slope) - ssf[i - t]
|
||||
sum = sum / period
|
||||
sums[i] = sum
|
||||
|
||||
ms[i] = 0.04 * sums[i] * sums[i] + 0.96 * ms[i - 1]
|
||||
if ms[i] > 0:
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
import numpy as np
|
||||
from typing import Union
|
||||
|
||||
try:
|
||||
from numba import njit, guvectorize
|
||||
except ImportError:
|
||||
njit = lambda a: a
|
||||
|
||||
from jesse.helpers import get_candle_source, slice_candles
|
||||
|
||||
|
||||
def rma(candles: np.ndarray, length: int = 14, source_type="close", sequential=False) -> \
|
||||
Union[float, np.ndarray]:
|
||||
"""
|
||||
Moving average used in RSI. It is the exponentially weighted moving average with alpha = 1 / length.
|
||||
RETURNS Exponential moving average of x with alpha = 1 / y.
|
||||
https://www.tradingview.com/pine-script-reference/#fun_rma
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param length: int - default: 14
|
||||
:param source_type: str - default: close
|
||||
:param sequential: bool - default: False
|
||||
:return: Union[float, np.ndarray]
|
||||
"""
|
||||
|
||||
if length < 1:
|
||||
raise ValueError('Bad parameters.')
|
||||
|
||||
# Accept normal array too.
|
||||
if len(candles.shape) == 1:
|
||||
source = candles
|
||||
else:
|
||||
candles = slice_candles(candles, sequential)
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
res = rma_fast(source, length)
|
||||
return res if sequential else res[-1]
|
||||
|
||||
|
||||
@njit
|
||||
def rma_fast(source, _length):
|
||||
alpha = 1 / _length
|
||||
newseries = np.copy(source)
|
||||
out = np.full_like(source, np.nan)
|
||||
for i in range(source.size):
|
||||
if np.isnan(newseries[i - 1]):
|
||||
# Sma in Numba
|
||||
asum = 0.0
|
||||
count = 0
|
||||
for i in range(_length):
|
||||
asum += source[i]
|
||||
count += 1
|
||||
out[i] = asum / count
|
||||
for i in range(_length, len(source)):
|
||||
asum += source[i] - source[i - _length]
|
||||
out[i] = asum / count
|
||||
newseries[i] = out[-1]
|
||||
else:
|
||||
prev = newseries[i - 1]
|
||||
if np.isnan(prev):
|
||||
prev = 0
|
||||
newseries[i] = alpha * source[i] + (1 - alpha) * prev
|
||||
return newseries
|
||||
@@ -13,8 +13,7 @@ def roofing(candles: np.ndarray, hp_period: int = 48, lp_period: int = 10, sourc
|
||||
Roofing Filter indicator by John F. Ehlers
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param hp_period: int - default: 48
|
||||
:param lp_period: int - default: 10
|
||||
:param period: int - default: 20
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
|
||||
@@ -17,11 +17,7 @@ def rsmk(candles: np.ndarray, candles_compare: np.ndarray, lookback: int = 90, p
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param candles_compare: np.ndarray
|
||||
:param lookback: int - default: 90
|
||||
:param period: int - default: 3
|
||||
:param signal_period: int - default: 20
|
||||
:param matype: int - default: 1
|
||||
:param signal_matype: int - default: 1
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ def rsx(candles: np.ndarray, period: int = 14, source_type: str = "close", seque
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 14
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
@@ -34,7 +33,7 @@ def rsx_fast(source, period):
|
||||
# variables
|
||||
f0 = 0
|
||||
f8 = 0
|
||||
# f10 = 0
|
||||
f10 = 0
|
||||
f18 = 0
|
||||
f20 = 0
|
||||
f28 = 0
|
||||
@@ -52,15 +51,15 @@ def rsx_fast(source, period):
|
||||
f88 = 0
|
||||
f90 = 0
|
||||
|
||||
# v4 = 0
|
||||
# v8 = 0
|
||||
# v10 = 0
|
||||
v4 = 0
|
||||
v8 = 0
|
||||
v10 = 0
|
||||
v14 = 0
|
||||
# v18 = 0
|
||||
v18 = 0
|
||||
v20 = 0
|
||||
|
||||
# vC = 0
|
||||
# v1C = 0
|
||||
vC = 0
|
||||
v1C = 0
|
||||
|
||||
res = np.full_like(source, np.nan)
|
||||
|
||||
@@ -68,12 +67,18 @@ def rsx_fast(source, period):
|
||||
if f90 == 0:
|
||||
f90 = 1.0
|
||||
f0 = 0.0
|
||||
f88 = period - 1.0 if period >= 6 else 5.0
|
||||
if period - 1.0 >= 5:
|
||||
f88 = period - 1.0
|
||||
else:
|
||||
f88 = 5.0
|
||||
f8 = 100.0 * source[i]
|
||||
f18 = 3.0 / (period + 2.0)
|
||||
f20 = 1.0 - f18
|
||||
else:
|
||||
f90 = f88 + 1 if f88 <= f90 else f90 + 1
|
||||
if f88 <= f90:
|
||||
f90 = f88 + 1
|
||||
else:
|
||||
f90 = f90 + 1
|
||||
f10 = f8
|
||||
f8 = 100 * source[i]
|
||||
v8 = f8 - f10
|
||||
@@ -101,8 +106,10 @@ def rsx_fast(source, period):
|
||||
f90 = 0.0
|
||||
if f88 < f90 and v20 > 0.0000000001:
|
||||
v4 = (v14 / v20 + 1.0) * 50.0
|
||||
v4 = min(v4, 100.0)
|
||||
v4 = max(v4, 0.0)
|
||||
if v4 > 100.0:
|
||||
v4 = 100.0
|
||||
if v4 < 0.0:
|
||||
v4 = 0.0
|
||||
else:
|
||||
v4 = 50.0
|
||||
res[i] = v4
|
||||
|
||||
@@ -17,7 +17,6 @@ def rvi(candles: np.ndarray, period: int = 10, ma_len: int = 14, matype: int = 1
|
||||
:param period: int - default: 10
|
||||
:param ma_len: int - default: 14
|
||||
:param matype: int - default: 1
|
||||
:param devtype: int - default: 0
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
:return: float | np.ndarray
|
||||
|
||||
@@ -15,13 +15,13 @@ def sarext(candles: np.ndarray, start_value: float = 0, offset_on_reverse: float
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param start_value: float - default: 0
|
||||
:param offset_on_reverse: float - default: 0
|
||||
:param acceleration_init_long: float - default: 0
|
||||
:param acceleration_long: float - default: 0
|
||||
:param acceleration_max_long: float - default: 0
|
||||
:param acceleration_init_short: float - default: 0
|
||||
:param acceleration_short: float - default: 0
|
||||
:param acceleration_max_short: float - default: 0
|
||||
:param offsetonreverse: float - default: 0
|
||||
:param accelerationinitlong: float - default: 0
|
||||
:param accelerationlong: float - default: 0
|
||||
:param accelerationmaxlong: float - default: 0
|
||||
:param accelerationinitshort: float - default: 0
|
||||
:param accelerationshort: float - default: 0
|
||||
:param accelerationmaxshort: float - default: 0
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
|
||||
@@ -25,10 +25,7 @@ def sinwma(candles: np.ndarray, period: int = 14, source_type: str = "close", se
|
||||
candles = slice_candles(candles, sequential)
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
sines = np.array(
|
||||
[np.sin((i + 1) * np.pi / (period + 1)) for i in range(period)]
|
||||
)
|
||||
|
||||
sines = np.array([np.sin((i + 1) * np.pi / (period + 1)) for i in range(0, period)])
|
||||
w = sines / sines.sum()
|
||||
swv = sliding_window_view(source, window_shape=period)
|
||||
res = np.average(swv, weights=w, axis=-1)
|
||||
|
||||
@@ -36,11 +36,13 @@ def numpy_ewma(data, window):
|
||||
:return:
|
||||
"""
|
||||
alpha = 1 / window
|
||||
# scale = 1 / (1 - alpha)
|
||||
scale = 1 / (1 - alpha)
|
||||
n = data.shape[0]
|
||||
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
|
||||
weights = (1 - alpha) ** np.arange(n)
|
||||
pw0 = (1 - alpha) ** (n - 1)
|
||||
mult = data * pw0 * scale_arr
|
||||
cumsums = mult.cumsum()
|
||||
return cumsums * scale_arr[::-1] / weights.cumsum()
|
||||
out = cumsums * scale_arr[::-1] / weights.cumsum()
|
||||
|
||||
return out
|
||||
|
||||
@@ -39,11 +39,11 @@ def sqwma(candles: np.ndarray, period: int = 14, source_type: str = "close", seq
|
||||
def sqwma_fast(source, period):
|
||||
newseries = np.copy(source)
|
||||
for j in range(period + 1, source.shape[0]):
|
||||
my_sum = 0.0
|
||||
sum = 0.0
|
||||
weightSum = 0.0
|
||||
for i in range(period - 1):
|
||||
weight = np.power(period - i, 2)
|
||||
my_sum += (source[j - i] * weight)
|
||||
sum += (source[j - i] * weight)
|
||||
weightSum += weight
|
||||
newseries[j] = my_sum / weightSum
|
||||
newseries[j] = sum / weightSum
|
||||
return newseries
|
||||
|
||||
@@ -15,7 +15,7 @@ def srsi(candles: np.ndarray, period: int = 14, period_stoch: int = 14, k: int =
|
||||
Stochastic RSI
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default: 14 - RSI Length
|
||||
:param period_rsi: int - default: 14 - RSI Length
|
||||
:param period_stoch: int - default: 14 - Stochastic Length
|
||||
:param k: int - default: 3
|
||||
:param d: int - default: 3
|
||||
|
||||
@@ -39,11 +39,11 @@ def srwma(candles: np.ndarray, period: int = 14, source_type: str = "close", seq
|
||||
def srwma_fast(source, period):
|
||||
newseries = np.copy(source)
|
||||
for j in range(period + 1, source.shape[0]):
|
||||
my_sum = 0.0
|
||||
sum = 0.0
|
||||
weightSum = 0.0
|
||||
for i in range(period - 1):
|
||||
weight = np.power(period - i, 0.5)
|
||||
my_sum += (source[j - i] * weight)
|
||||
sum += (source[j - i] * weight)
|
||||
weightSum += weight
|
||||
newseries[j] = my_sum / weightSum
|
||||
newseries[j] = sum / weightSum
|
||||
return newseries
|
||||
|
||||
@@ -16,9 +16,9 @@ def stc(candles: np.ndarray, fast_period: int = 23, fast_matype: int = 1, slow_p
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param fast_period: int - default: 23
|
||||
:param fast_matype: int - default: 1
|
||||
:param fastmatype: int - default: 1
|
||||
:param slow_period: int - default: 50
|
||||
:param slow_matype: int - default: 1
|
||||
:param slowmatype: int - default: 1
|
||||
:param k_period: int - default: 10
|
||||
:param d_period: int - default: 3
|
||||
:param source_type: str - default: "close"
|
||||
|
||||
@@ -33,8 +33,8 @@ def stoch(candles: np.ndarray, fastk_period: int = 14, slowk_period: int = 3, sl
|
||||
hh = talib.MAX(candles_high, fastk_period)
|
||||
ll = talib.MIN(candles_low, fastk_period)
|
||||
|
||||
stoch_val = 100 * (candles_close - ll) / (hh - ll)
|
||||
k = ma(stoch_val, period=slowk_period, matype=slowk_matype, sequential=True)
|
||||
stoch = 100 * (candles_close - ll) / (hh - ll)
|
||||
k = ma(stoch, period=slowk_period, matype=slowk_matype, sequential=True)
|
||||
d = ma(k, period=slowd_period, matype=slowd_matype, sequential=True)
|
||||
|
||||
if sequential:
|
||||
|
||||
@@ -15,10 +15,12 @@ SuperTrend = namedtuple('SuperTrend', ['trend', 'changed'])
|
||||
def supertrend(candles: np.ndarray, period: int = 10, factor: float = 3, sequential: bool = False) -> SuperTrend:
|
||||
"""
|
||||
SuperTrend
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param period: int - default=14
|
||||
:param factor: float - default=3
|
||||
:param sequential: bool - default=False
|
||||
:param period: int - default: 14
|
||||
:param factor: float - default: 3
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: SuperTrend(trend, changed)
|
||||
"""
|
||||
|
||||
@@ -42,13 +44,13 @@ def supertrend_fast(candles, atr, factor, period):
|
||||
lower_basic = (candles[:, 3] + candles[:, 4]) / 2 - (factor * atr)
|
||||
upper_band = upper_basic
|
||||
lower_band = lower_basic
|
||||
super_trend = np.zeros(len(candles))
|
||||
changed = np.zeros(len(candles))
|
||||
super_trend = np.zeros(candles.size)
|
||||
changed = np.zeros(candles.size)
|
||||
|
||||
# calculate the bands:
|
||||
# in an UPTREND, lower band does not decrease
|
||||
# in a DOWNTREND, upper band does not increase
|
||||
for i in range(period, len(candles)):
|
||||
for i in range(period, candles.size):
|
||||
# if currently in DOWNTREND (i.e. price is below upper band)
|
||||
prevClose = candles[:, 2][i - 1]
|
||||
prevUpperBand = upper_band[i - 1]
|
||||
@@ -71,7 +73,7 @@ def supertrend_fast(candles, atr, factor, period):
|
||||
super_trend[i - 1] = prevLowerBand
|
||||
prevSuperTrend = super_trend[i - 1]
|
||||
|
||||
for i in range(period, len(candles)):
|
||||
for i in range(period, candles.size):
|
||||
prevClose = candles[:, 2][i - 1]
|
||||
prevUpperBand = upper_band[i - 1]
|
||||
currUpperBand = upper_band[i]
|
||||
@@ -94,4 +96,4 @@ def supertrend_fast(candles, atr, factor, period):
|
||||
else:
|
||||
super_trend[i] = currUpperBand # switch to DOWNTREND
|
||||
changed[i] = True
|
||||
return super_trend, changed
|
||||
return super_trend, changed
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
from typing import Union
|
||||
from math import floor
|
||||
import numpy as np
|
||||
@@ -48,16 +49,17 @@ def symmetric_triangle(n: int = None) -> np.ndarray:
|
||||
|
||||
if n > 2:
|
||||
if n % 2 == 0:
|
||||
front = [i + 1 for i in range(floor(n / 2))]
|
||||
front = [i + 1 for i in range(0, floor(n / 2))]
|
||||
triangle = front + front[::-1]
|
||||
else:
|
||||
front = [i + 1 for i in range(floor(0.5 * (n + 1)))]
|
||||
front = [i + 1 for i in range(0, floor(0.5 * (n + 1)))]
|
||||
triangle = front.copy()
|
||||
front.pop()
|
||||
triangle += front[::-1]
|
||||
|
||||
|
||||
triangle_sum = np.sum(triangle)
|
||||
return triangle / triangle_sum
|
||||
triangle_weights = triangle / triangle_sum
|
||||
return triangle_weights
|
||||
|
||||
|
||||
|
||||
@@ -47,12 +47,12 @@ def trendflex_fast(ssf, period):
|
||||
sums = np.full_like(ssf, 0)
|
||||
|
||||
for i in range(ssf.shape[0]):
|
||||
if i >= period:
|
||||
my_sum = 0
|
||||
if not (i < period):
|
||||
sum = 0
|
||||
for t in range(1, period + 1):
|
||||
my_sum = my_sum + ssf[i] - ssf[i - t]
|
||||
my_sum /= period
|
||||
sums[i] = my_sum
|
||||
sum = sum + ssf[i] - ssf[i - t]
|
||||
sum = sum / period
|
||||
sums[i] = sum
|
||||
|
||||
ms[i] = 0.04 * sums[i] * sums[i] + 0.96 * ms[i - 1]
|
||||
if ms[i] != 0:
|
||||
|
||||
@@ -23,6 +23,6 @@ def var(candles: np.ndarray, period: int = 14, nbdev: float = 1, source_type: st
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
res = talib.VAR(source, timeperiod=period, nbdev=nbdev)
|
||||
res = talib.VAR(candles[:, 2], timeperiod=period, nbdev=nbdev)
|
||||
|
||||
return res if sequential else res[-1]
|
||||
|
||||
@@ -62,7 +62,7 @@ def vlma_fast(source, a, b, c, d, min_period, max_period):
|
||||
period = np.zeros_like(source)
|
||||
for i in range(1, source.shape[0]):
|
||||
nz_period = period[i - 1] if period[i - 1] != 0 else max_period
|
||||
period[i] = nz_period + 1 if b[i] <= source[i] <= c[i] else nz_period - 1 if source[i] < a[i] or source[i] > d[i] else nz_period
|
||||
period[i] = nz_period + 1 if source[i] >= b[i] and source[i] <= c[i] else nz_period - 1 if source[i] < a[i] or source[i] > d[i] else nz_period
|
||||
period[i] = max(min(period[i], max_period), min_period)
|
||||
sc = 2 / (period[i] + 1)
|
||||
newseries[i] = (source[i] * sc) + ((1 - sc) * newseries[i - 1])
|
||||
|
||||
@@ -29,12 +29,12 @@ def voss(candles: np.ndarray, period: int = 20, predict: int = 3, bandwith: floa
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
voss_val, filt = voss_fast(source, period, predict, bandwith)
|
||||
voss, filt = voss_fast(source, period, predict, bandwith)
|
||||
|
||||
if sequential:
|
||||
return VossFilter(voss_val, filt)
|
||||
return VossFilter(voss, filt)
|
||||
else:
|
||||
return VossFilter(voss_val[-1], filt[-1])
|
||||
return VossFilter(voss[-1], filt[-1])
|
||||
|
||||
|
||||
@njit
|
||||
@@ -50,7 +50,7 @@ def voss_fast(source, period, predict, bandwith):
|
||||
s1 = 1 / g1 - np.sqrt(1 / (g1 * g1) - 1)
|
||||
|
||||
for i in range(source.shape[0]):
|
||||
if i > period and i > 5 and i > order:
|
||||
if not (i <= period or i <= 5 or i <= order):
|
||||
filt[i] = 0.5 * (1 - s1) * (source[i] - source[i - 2]) + f1 * (1 + s1) * filt[i - 1] - s1 * filt[i - 2]
|
||||
|
||||
for i in range(source.shape[0]):
|
||||
@@ -58,5 +58,6 @@ def voss_fast(source, period, predict, bandwith):
|
||||
sumc = 0
|
||||
for count in range(order):
|
||||
sumc = sumc + ((count + 1) / float(order)) * voss[i - (order - count)]
|
||||
|
||||
voss[i] = ((3 + order) / 2) * filt[i] - sumc
|
||||
return voss, filt
|
||||
|
||||
@@ -18,7 +18,7 @@ def vpt(candles: np.ndarray, source_type: str = "close", sequential: bool = Fals
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
source = get_candle_source(candles, source_type=source_type)
|
||||
vpt_val = (candles[:, 5] * ((source - np_shift(source, 1, fill_value=np.nan)) / np_shift(source, 1, fill_value=np.nan)))
|
||||
res = np_shift(vpt_val, 1, fill_value=np.nan) + vpt_val
|
||||
vpt = (candles[:, 5] * ((source - np_shift(source, 1, fill_value=np.nan)) / np_shift(source, 1, fill_value=np.nan)))
|
||||
res = np_shift(vpt, 1, fill_value=np.nan) + vpt
|
||||
|
||||
return res if sequential else res[-1]
|
||||
|
||||
@@ -40,11 +40,11 @@ def vpwma(candles: np.ndarray, period: int = 14, power: float = 0.382, source_ty
|
||||
def vpwma_fast(source, period, power):
|
||||
newseries = np.copy(source)
|
||||
for j in range(period + 1, source.shape[0]):
|
||||
my_sum = 0.0
|
||||
sum = 0.0
|
||||
weightSum = 0.0
|
||||
for i in range(period - 1):
|
||||
weight = np.power(period - i, power)
|
||||
my_sum += (source[j - i] * weight)
|
||||
sum += (source[j - i] * weight)
|
||||
weightSum += weight
|
||||
newseries[j] = my_sum / weightSum
|
||||
newseries[j] = sum / weightSum
|
||||
return newseries
|
||||
|
||||
@@ -18,7 +18,6 @@ def vwap(
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param source_type: str - default: "close"
|
||||
:param anchor: str - default: "D"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: float | np.ndarray
|
||||
|
||||
@@ -25,11 +25,11 @@ def vwmacd(candles: np.ndarray, fast_period: int = 12, slow_period: int = 26, si
|
||||
|
||||
vwma_slow = talib.SMA(candles[:, 2] * candles[:, 5], slow_period) / talib.SMA(candles[:, 5], slow_period)
|
||||
vwma_fast = talib.SMA(candles[:, 2] * candles[:, 5], fast_period) / talib.SMA(candles[:, 5], fast_period)
|
||||
vwmacd_val = vwma_fast - vwma_slow
|
||||
signal = talib.EMA(vwmacd_val, signal_period)
|
||||
hist = vwmacd_val - signal
|
||||
vwmacd = vwma_fast - vwma_slow
|
||||
signal = talib.EMA(vwmacd, signal_period)
|
||||
hist = vwmacd - signal
|
||||
|
||||
if sequential:
|
||||
return VWMACD(vwmacd_val, signal, hist)
|
||||
return VWMACD(vwmacd, signal, hist)
|
||||
else:
|
||||
return VWMACD(vwmacd_val[-1], signal[-1], hist[-1])
|
||||
return VWMACD(vwmacd[-1], signal[-1], hist[-1])
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
from collections import namedtuple
|
||||
|
||||
import numpy as np
|
||||
import talib as ta
|
||||
|
||||
from jesse.helpers import get_candle_source, slice_candles
|
||||
|
||||
Wavetrend = namedtuple('Wavetrend', ['wt1', 'wt2', 'wtCrossUp', 'wtCrossDown', 'wtOversold', 'wtOverbought', 'wtVwap'])
|
||||
|
||||
|
||||
# Wavetrend indicator ported from: https://www.tradingview.com/script/Msm4SjwI-VuManChu-Cipher-B-Divergences/
|
||||
# https://www.tradingview.com/script/2KE8wTuF-Indicator-WaveTrend-Oscillator-WT/
|
||||
#
|
||||
# buySignal = wtCross and wtCrossUp and wtOversold
|
||||
# sellSignal = wtCross and wtCrossDown and wtOverbought
|
||||
#
|
||||
# See https://github.com/ysdede/lazarus3/blob/partialexit/strategies/lazarus3/__init__.py for working jesse.ai example.
|
||||
|
||||
|
||||
def wt(candles: np.ndarray,
|
||||
wtchannellen: int = 9,
|
||||
wtaveragelen: int = 12,
|
||||
wtmalen: int = 3,
|
||||
oblevel: int = 53,
|
||||
oslevel: int = -53,
|
||||
source_type: str = "hlc3",
|
||||
sequential: bool = False) -> Wavetrend:
|
||||
"""
|
||||
Wavetrend indicator
|
||||
|
||||
:param candles: np.ndarray
|
||||
:param wtchannellen: int - default: 9
|
||||
:param wtaveragelen: int - default: 12
|
||||
:param wtmalen: int - default: 3
|
||||
:param oblevel: int - default: 53
|
||||
:param oslevel: int - default: -53
|
||||
:param source_type: str - default: "hlc3"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
:return: Wavetrend
|
||||
"""
|
||||
candles = slice_candles(candles, sequential)
|
||||
|
||||
src = get_candle_source(candles, source_type=source_type)
|
||||
|
||||
# wt
|
||||
esa = ta.EMA(src, wtchannellen)
|
||||
de = ta.EMA(abs(src - esa), wtchannellen)
|
||||
ci = (src - esa) / (0.015 * de)
|
||||
wt1 = ta.EMA(ci, wtaveragelen)
|
||||
wt2 = ta.SMA(wt1, wtmalen)
|
||||
|
||||
wtVwap = wt1 - wt2
|
||||
wtOversold = wt2 <= oslevel
|
||||
wtOverbought = wt2 >= oblevel
|
||||
wtCrossUp = wt2 - wt1 <= 0
|
||||
wtCrossDown = wt2 - wt1 >= 0
|
||||
|
||||
if sequential:
|
||||
return Wavetrend(wt1, wt2, wtCrossUp, wtCrossDown, wtOversold, wtOverbought, wtVwap)
|
||||
else:
|
||||
return Wavetrend(wt1[-1], wt2[-1], wtCrossUp[-1], wtCrossDown[-1], wtOversold[-1], wtOverbought[-1], wtVwap[-1])
|
||||
@@ -19,7 +19,6 @@ def zscore(candles: np.ndarray, period: int = 14, matype: int = 0, nbdev: float
|
||||
:param period: int - default: 14
|
||||
:param matype: int - default: 0
|
||||
:param nbdev: float - default: 1
|
||||
:param devtype: int - default: 0
|
||||
:param source_type: str - default: "close"
|
||||
:param sequential: bool - default: False
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
import numpy as np
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class NpEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, np.integer):
|
||||
return int(obj)
|
||||
elif isinstance(obj, np.floating):
|
||||
return float(obj)
|
||||
elif isinstance(obj, np.ndarray):
|
||||
return obj.tolist()
|
||||
else:
|
||||
return super(NpEncoder, self).default(obj)
|
||||
@@ -11,17 +11,17 @@ class DynamicNumpyArray:
|
||||
allocation every N number. Hence, it's both fast and dynamic.
|
||||
"""
|
||||
|
||||
def __init__(self, shape: tuple, drop_at: int = None):
|
||||
def __init__(self, shape: tuple, drop_at=None):
|
||||
self.index = -1
|
||||
self.array = np.zeros(shape)
|
||||
self.bucket_size = shape[0]
|
||||
self.shape = shape
|
||||
self.drop_at = drop_at
|
||||
|
||||
def __str__(self) -> str:
|
||||
def __str__(self):
|
||||
return str(self.array[:self.index + 1])
|
||||
|
||||
def __len__(self) -> int:
|
||||
def __len__(self):
|
||||
return self.index + 1
|
||||
|
||||
def __getitem__(self, i):
|
||||
@@ -31,7 +31,9 @@ class DynamicNumpyArray:
|
||||
|
||||
if stop < 0:
|
||||
stop = (self.index + 1) - abs(stop)
|
||||
stop = min(stop, self.index + 1)
|
||||
if stop > (self.index + 1):
|
||||
stop = self.index + 1
|
||||
|
||||
return self.array[start:stop]
|
||||
else:
|
||||
if i < 0:
|
||||
@@ -43,7 +45,7 @@ class DynamicNumpyArray:
|
||||
|
||||
return self.array[i]
|
||||
|
||||
def __setitem__(self, i, item) -> None:
|
||||
def __setitem__(self, i, item):
|
||||
if i < 0:
|
||||
i = (self.index + 1) - abs(i)
|
||||
|
||||
@@ -53,7 +55,7 @@ class DynamicNumpyArray:
|
||||
|
||||
self.array[i] = item
|
||||
|
||||
def append(self, item: np.ndarray) -> None:
|
||||
def append(self, item: np.ndarray):
|
||||
self.index += 1
|
||||
|
||||
# expand if the arr is almost full
|
||||
@@ -62,14 +64,11 @@ class DynamicNumpyArray:
|
||||
self.array = np.concatenate((self.array, new_bucket), axis=0)
|
||||
|
||||
# drop N% of the beginning values to free memory
|
||||
if (
|
||||
self.drop_at is not None
|
||||
and self.index != 0
|
||||
and (self.index + 1) % self.drop_at == 0
|
||||
):
|
||||
shift_num = int(self.drop_at / 2)
|
||||
self.index -= shift_num
|
||||
self.array = np_shift(self.array, -shift_num)
|
||||
if self.drop_at is not None:
|
||||
if self.index != 0 and (self.index + 1) % self.drop_at == 0:
|
||||
shift_num = int(self.drop_at / 2)
|
||||
self.index -= shift_num
|
||||
self.array = np_shift(self.array, -shift_num)
|
||||
|
||||
self.array[self.index] = item
|
||||
|
||||
@@ -80,7 +79,7 @@ class DynamicNumpyArray:
|
||||
|
||||
return self.array[self.index]
|
||||
|
||||
def get_past_item(self, past_index) -> np.ndarray:
|
||||
def get_past_item(self, past_index):
|
||||
# validation
|
||||
if self.index == -1:
|
||||
raise IndexError('list assignment index out of range')
|
||||
@@ -90,7 +89,7 @@ class DynamicNumpyArray:
|
||||
|
||||
return self.array[self.index - past_index]
|
||||
|
||||
def flush(self) -> None:
|
||||
def flush(self):
|
||||
self.index = -1
|
||||
self.array = np.zeros(self.shape)
|
||||
self.bucket_size = self.shape[0]
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import peewee
|
||||
from jesse.services.db import database
|
||||
|
||||
|
||||
if database.is_closed():
|
||||
database.open_connection()
|
||||
import jesse.helpers as jh
|
||||
from jesse.services.db import db
|
||||
|
||||
|
||||
class Candle(peewee.Model):
|
||||
@@ -21,23 +19,21 @@ class Candle(peewee.Model):
|
||||
is_partial = True
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
database = db
|
||||
indexes = (
|
||||
(('timestamp', 'exchange', 'symbol'), True),
|
||||
)
|
||||
|
||||
def __init__(self, attributes: dict = None, **kwargs) -> None:
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a, value in attributes.items():
|
||||
setattr(self, a, value)
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
|
||||
# if database is open, create the table
|
||||
if database.is_open():
|
||||
if not jh.is_unit_testing():
|
||||
# create the table
|
||||
Candle.create_table()
|
||||
|
||||
@@ -3,11 +3,7 @@ import peewee
|
||||
|
||||
import jesse.helpers as jh
|
||||
from jesse.config import config
|
||||
from jesse.services.db import database
|
||||
|
||||
|
||||
if database.is_closed():
|
||||
database.open_connection()
|
||||
from jesse.services.db import db
|
||||
|
||||
|
||||
class CompletedTrade(peewee.Model):
|
||||
@@ -33,22 +29,22 @@ class CompletedTrade(peewee.Model):
|
||||
orders = []
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
database = db
|
||||
indexes = ((('strategy_name', 'exchange', 'symbol'), False),)
|
||||
|
||||
def __init__(self, attributes: dict = None, **kwargs) -> None:
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a, value in attributes.items():
|
||||
setattr(self, a, value)
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
def toJSON(self) -> dict:
|
||||
orders = [o.__dict__ for o in self.orders]
|
||||
orders = []
|
||||
for o in self.orders:
|
||||
orders.append(o.__dict__)
|
||||
return {
|
||||
"id": self.id,
|
||||
"strategy_name": self.strategy_name,
|
||||
@@ -57,9 +53,15 @@ class CompletedTrade(peewee.Model):
|
||||
"type": self.type,
|
||||
"entry_price": self.entry_price,
|
||||
"exit_price": self.exit_price,
|
||||
"take_profit_at": self.take_profit_at,
|
||||
"stop_loss_at": self.stop_loss_at,
|
||||
"qty": self.qty,
|
||||
"fee": self.fee,
|
||||
"reward": self.reward,
|
||||
"size": self.size,
|
||||
"risk": self.risk,
|
||||
"risk_percentage": self.risk_percentage,
|
||||
"R": self.r,
|
||||
"PNL": self.pnl,
|
||||
"PNL_percentage": self.pnl_percentage,
|
||||
"holding_period": self.holding_period,
|
||||
@@ -79,13 +81,19 @@ class CompletedTrade(peewee.Model):
|
||||
'type': self.type,
|
||||
'entry_price': self.entry_price,
|
||||
'exit_price': self.exit_price,
|
||||
'take_profit_at': self.take_profit_at,
|
||||
'stop_loss_at': self.stop_loss_at,
|
||||
'qty': self.qty,
|
||||
'opened_at': self.opened_at,
|
||||
'closed_at': self.closed_at,
|
||||
'entry_candle_timestamp': self.entry_candle_timestamp,
|
||||
'exit_candle_timestamp': self.exit_candle_timestamp,
|
||||
"fee": self.fee,
|
||||
"reward": self.reward,
|
||||
"size": self.size,
|
||||
"risk": self.risk,
|
||||
"risk_percentage": self.risk_percentage,
|
||||
"R": self.r,
|
||||
"PNL": self.pnl,
|
||||
"PNL_percentage": self.pnl_percentage,
|
||||
"holding_period": self.holding_period,
|
||||
@@ -96,10 +104,31 @@ class CompletedTrade(peewee.Model):
|
||||
trading_fee = jh.get_config(f'env.exchanges.{self.exchange}.fee')
|
||||
return trading_fee * self.qty * (self.entry_price + self.exit_price)
|
||||
|
||||
@property
|
||||
def reward(self) -> float:
|
||||
return abs(self.take_profit_at - self.entry_price) * self.qty
|
||||
|
||||
@property
|
||||
def size(self) -> float:
|
||||
return self.qty * self.entry_price
|
||||
|
||||
@property
|
||||
def risk(self) -> float:
|
||||
return abs(self.stop_loss_at - self.entry_price) * self.qty
|
||||
|
||||
@property
|
||||
def risk_percentage(self) -> float:
|
||||
return round((self.risk / self.size) * 100, 2)
|
||||
|
||||
@property
|
||||
def risk_reward_ratio(self) -> float:
|
||||
return self.reward / self.risk
|
||||
|
||||
@property
|
||||
def r(self) -> float:
|
||||
"""alias for risk_reward_ratio"""
|
||||
return self.risk_reward_ratio
|
||||
|
||||
@property
|
||||
def pnl(self) -> float:
|
||||
"""PNL"""
|
||||
@@ -137,6 +166,6 @@ class CompletedTrade(peewee.Model):
|
||||
return (self.closed_at - self.opened_at) / 1000
|
||||
|
||||
|
||||
# if database is open, create the table
|
||||
if database.is_open():
|
||||
if not jh.is_unit_testing():
|
||||
# create the table
|
||||
CompletedTrade.create_table()
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import peewee
|
||||
from jesse.services.db import database
|
||||
|
||||
|
||||
if database.is_closed():
|
||||
database.open_connection()
|
||||
import jesse.helpers as jh
|
||||
from jesse.services.db import db
|
||||
|
||||
|
||||
class DailyBalance(peewee.Model):
|
||||
@@ -15,24 +13,22 @@ class DailyBalance(peewee.Model):
|
||||
balance = peewee.FloatField()
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
database = db
|
||||
indexes = (
|
||||
(('identifier', 'exchange', 'timestamp', 'asset'), True),
|
||||
(('identifier', 'exchange'), False),
|
||||
)
|
||||
|
||||
def __init__(self, attributes: dict = None, **kwargs) -> None:
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a, value in attributes.items():
|
||||
setattr(self, a, value)
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
|
||||
# if database is open, create the table
|
||||
if database.is_open():
|
||||
if not jh.is_unit_testing():
|
||||
# create the table
|
||||
DailyBalance.create_table()
|
||||
|
||||
@@ -32,29 +32,29 @@ class Exchange(ABC):
|
||||
self.fee_rate = fee_rate
|
||||
|
||||
@abstractmethod
|
||||
def wallet_balance(self, symbol: str = '') -> float:
|
||||
def wallet_balance(self, symbol=''):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def available_margin(self, symbol: str = '') -> float:
|
||||
def available_margin(self, symbol=''):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def on_order_submission(self, order: Order, skip_market_order: bool = True) -> None:
|
||||
def on_order_submission(self, order: Order, skip_market_order=True):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def on_order_execution(self, order: Order) -> None:
|
||||
def on_order_execution(self, order: Order):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def on_order_cancellation(self, order: Order) -> None:
|
||||
def on_order_cancellation(self, order: Order):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_realized_pnl(self, realized_pnl: float) -> None:
|
||||
def add_realized_pnl(self, realized_pnl: float):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def charge_fee(self, amount: float) -> None:
|
||||
def charge_fee(self, amount):
|
||||
pass
|
||||
|
||||
@@ -58,10 +58,10 @@ class FuturesExchange(Exchange):
|
||||
|
||||
self.settlement_currency = settlement_currency.upper()
|
||||
|
||||
def wallet_balance(self, symbol: str = '') -> float:
|
||||
def wallet_balance(self, symbol=''):
|
||||
return self.assets[self.settlement_currency]
|
||||
|
||||
def available_margin(self, symbol: str = '') -> float:
|
||||
def available_margin(self, symbol=''):
|
||||
# a temp which gets added to per each asset (remember that all future assets use the same currency for settlement)
|
||||
temp_credits = self.assets[self.settlement_currency]
|
||||
|
||||
@@ -96,7 +96,7 @@ class FuturesExchange(Exchange):
|
||||
# count in the leverage
|
||||
return temp_credits * self.futures_leverage
|
||||
|
||||
def charge_fee(self, amount: float) -> None:
|
||||
def charge_fee(self, amount):
|
||||
fee_amount = abs(amount) * self.fee_rate
|
||||
new_balance = self.assets[self.settlement_currency] - fee_amount
|
||||
logger.info(
|
||||
@@ -104,22 +104,23 @@ class FuturesExchange(Exchange):
|
||||
)
|
||||
self.assets[self.settlement_currency] = new_balance
|
||||
|
||||
def add_realized_pnl(self, realized_pnl: float) -> None:
|
||||
def add_realized_pnl(self, realized_pnl: float):
|
||||
new_balance = self.assets[self.settlement_currency] + realized_pnl
|
||||
logger.info(
|
||||
f'Added realized PNL of {round(realized_pnl, 2)}. Balance for {self.settlement_currency} on {self.name} changed from {round(self.assets[self.settlement_currency], 2)} to {round(new_balance, 2)}')
|
||||
self.assets[self.settlement_currency] = new_balance
|
||||
|
||||
def on_order_submission(self, order: Order, skip_market_order: bool = True) -> None:
|
||||
def on_order_submission(self, order: Order, skip_market_order=True):
|
||||
base_asset = jh.base_asset(order.symbol)
|
||||
|
||||
# make sure we don't spend more than we're allowed considering current allowed leverage
|
||||
if (order.type != order_types.MARKET or skip_market_order) and not order.is_reduce_only:
|
||||
order_size = abs(order.qty * order.price)
|
||||
remaining_margin = self.available_margin()
|
||||
if order_size > remaining_margin:
|
||||
raise InsufficientMargin(
|
||||
f'You cannot submit an order for ${round(order_size)} when your margin balance is ${round(remaining_margin)}')
|
||||
if order.type != order_types.MARKET or skip_market_order:
|
||||
if not order.is_reduce_only:
|
||||
order_size = abs(order.qty * order.price)
|
||||
remaining_margin = self.available_margin()
|
||||
if order_size > remaining_margin:
|
||||
raise InsufficientMargin(
|
||||
f'You cannot submit an order for ${round(order_size)} when your margin balance is ${round(remaining_margin)}')
|
||||
|
||||
# skip market order at the time of submission because we don't have
|
||||
# the exact order.price. Instead, we call on_order_submission() one
|
||||
@@ -135,7 +136,7 @@ class FuturesExchange(Exchange):
|
||||
else:
|
||||
self.sell_orders[base_asset].append(np.array([order.qty, order.price]))
|
||||
|
||||
def on_order_execution(self, order: Order) -> None:
|
||||
def on_order_execution(self, order: Order):
|
||||
base_asset = jh.base_asset(order.symbol)
|
||||
|
||||
if order.type == order_types.MARKET:
|
||||
@@ -155,7 +156,7 @@ class FuturesExchange(Exchange):
|
||||
self.sell_orders[base_asset][index] = np.array([0, 0])
|
||||
break
|
||||
|
||||
def on_order_cancellation(self, order: Order) -> None:
|
||||
def on_order_cancellation(self, order: Order):
|
||||
base_asset = jh.base_asset(order.symbol)
|
||||
|
||||
self.available_assets[base_asset] -= order.qty
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
import peewee
|
||||
from jesse.services.db import database
|
||||
|
||||
|
||||
if database.is_closed():
|
||||
database.open_connection()
|
||||
|
||||
|
||||
class Log(peewee.Model):
|
||||
id = peewee.UUIDField(primary_key=True)
|
||||
session_id = peewee.UUIDField(index=True)
|
||||
timestamp = peewee.BigIntegerField()
|
||||
message = peewee.TextField()
|
||||
# 1: info, 2: error, maybe add more in the future?
|
||||
type = peewee.SmallIntegerField()
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
database = database.db
|
||||
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
|
||||
# if database is open, create the table
|
||||
if database.is_open():
|
||||
Log.create_table()
|
||||
@@ -1,32 +0,0 @@
|
||||
import peewee
|
||||
from jesse.services.db import database
|
||||
|
||||
|
||||
if database.is_closed():
|
||||
database.open_connection()
|
||||
|
||||
|
||||
class Option(peewee.Model):
|
||||
id = peewee.UUIDField(primary_key=True)
|
||||
updated_at = peewee.BigIntegerField()
|
||||
type = peewee.CharField()
|
||||
json = peewee.TextField()
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
|
||||
# if database is open, create the table
|
||||
if database.is_open():
|
||||
Option.create_table()
|
||||
@@ -1,89 +1,69 @@
|
||||
import numpy as np
|
||||
from playhouse.postgres_ext import *
|
||||
|
||||
import jesse.helpers as jh
|
||||
import jesse.services.logger as logger
|
||||
import jesse.services.selectors as selectors
|
||||
from jesse import sync_publish
|
||||
from jesse.config import config
|
||||
from jesse.services.notifier import notify
|
||||
from jesse.enums import order_statuses, order_flags
|
||||
from jesse.services.db import database
|
||||
|
||||
|
||||
if database.is_closed():
|
||||
database.open_connection()
|
||||
from jesse.services.db import db
|
||||
from jesse.services.notifier import notify
|
||||
|
||||
|
||||
class Order(Model):
|
||||
# id generated by Jesse for database usage
|
||||
id = UUIDField(primary_key=True)
|
||||
trade_id = UUIDField(index=True, null=True)
|
||||
session_id = UUIDField(index=True)
|
||||
trade_id = UUIDField(index=True)
|
||||
|
||||
# id generated by market, used in live-trade mode
|
||||
exchange_id = CharField(null=True)
|
||||
exchange_id = CharField()
|
||||
# some exchanges might require even further info
|
||||
vars = JSONField(default={})
|
||||
|
||||
symbol = CharField()
|
||||
exchange = CharField()
|
||||
side = CharField()
|
||||
type = CharField()
|
||||
flag = CharField(null=True)
|
||||
qty = FloatField()
|
||||
price = FloatField(null=True)
|
||||
price = FloatField(default=np.nan)
|
||||
status = CharField(default=order_statuses.ACTIVE)
|
||||
created_at = BigIntegerField()
|
||||
executed_at = BigIntegerField(null=True)
|
||||
canceled_at = BigIntegerField(null=True)
|
||||
role = CharField(null=True)
|
||||
submitted_via = None
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
database = db
|
||||
indexes = ((('exchange', 'symbol'), False),)
|
||||
|
||||
def __init__(self, attributes: dict = None, **kwargs) -> None:
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a, value in attributes.items():
|
||||
setattr(self, a, value)
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
if self.created_at is None:
|
||||
self.created_at = jh.now_to_timestamp()
|
||||
|
||||
if jh.is_live():
|
||||
from jesse.store import store
|
||||
self.session_id = store.app.session_id
|
||||
self.save(force_insert=True)
|
||||
|
||||
if jh.is_live():
|
||||
if jh.is_live() and config['env']['notifications']['events']['submitted_orders']:
|
||||
self.notify_submission()
|
||||
if jh.is_debuggable('order_submission'):
|
||||
txt = f'{"QUEUED" if self.is_queued else "SUBMITTED"} order: {self.symbol}, {self.type}, {self.side}, {self.qty}'
|
||||
if self.price:
|
||||
txt += f', ${round(self.price, 2)}'
|
||||
logger.info(txt)
|
||||
logger.info(
|
||||
f'{"QUEUED" if self.is_queued else "SUBMITTED"} order: {self.symbol}, {self.type}, {self.side}, {self.qty}, ${round(self.price, 2)}'
|
||||
)
|
||||
|
||||
# handle exchange balance for ordered asset
|
||||
e = selectors.get_exchange(self.exchange)
|
||||
e.on_order_submission(self)
|
||||
|
||||
def broadcast(self) -> None:
|
||||
sync_publish('order', self.to_dict)
|
||||
|
||||
def notify_submission(self) -> None:
|
||||
self.broadcast()
|
||||
|
||||
if config['env']['notifications']['events']['submitted_orders']:
|
||||
txt = f'{"QUEUED" if self.is_queued else "SUBMITTED"} order: {self.symbol}, {self.type}, {self.side}, {self.qty}'
|
||||
if self.price:
|
||||
txt += f', ${round(self.price, 2)}'
|
||||
notify(txt)
|
||||
notify(
|
||||
f'{"QUEUED" if self.is_queued else "SUBMITTED"} order: {self.symbol}, {self.type}, {self.side}, { self.qty}, ${round(self.price, 2)}'
|
||||
)
|
||||
|
||||
@property
|
||||
def is_canceled(self) -> bool:
|
||||
@@ -124,71 +104,43 @@ class Order(Model):
|
||||
def is_close(self) -> bool:
|
||||
return self.flag == order_flags.CLOSE
|
||||
|
||||
@property
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'session_id': self.session_id,
|
||||
'exchange_id': self.exchange_id,
|
||||
'symbol': self.symbol,
|
||||
'side': self.side,
|
||||
'type': self.type,
|
||||
'qty': self.qty,
|
||||
'price': self.price,
|
||||
'flag': self.flag,
|
||||
'status': self.status,
|
||||
'created_at': self.created_at,
|
||||
'canceled_at': self.canceled_at,
|
||||
'executed_at': self.executed_at,
|
||||
}
|
||||
|
||||
def cancel(self, silent=False) -> None:
|
||||
def cancel(self) -> None:
|
||||
if self.is_canceled or self.is_executed:
|
||||
return
|
||||
|
||||
self.canceled_at = jh.now_to_timestamp()
|
||||
self.status = order_statuses.CANCELED
|
||||
|
||||
if jh.is_live():
|
||||
self.save()
|
||||
|
||||
if not silent:
|
||||
txt = f'CANCELED order: {self.symbol}, {self.type}, {self.side}, {self.qty}'
|
||||
if self.price:
|
||||
txt += f', ${round(self.price, 2)}'
|
||||
if jh.is_debuggable('order_cancellation'):
|
||||
logger.info(txt)
|
||||
if jh.is_live():
|
||||
self.broadcast()
|
||||
if config['env']['notifications']['events']['cancelled_orders']:
|
||||
notify(txt)
|
||||
if jh.is_debuggable('order_cancellation'):
|
||||
logger.info(
|
||||
f'CANCELED order: {self.symbol}, {self.type}, {self.side}, { self.qty}, ${round(self.price, 2)}'
|
||||
)
|
||||
if jh.is_live() and config['env']['notifications']['events']['cancelled_orders']:
|
||||
notify(
|
||||
f'CANCELED order: {self.symbol}, {self.type}, {self.side}, {self.qty}, {round(self.price, 2)}'
|
||||
)
|
||||
|
||||
# handle exchange balance
|
||||
e = selectors.get_exchange(self.exchange)
|
||||
e.on_order_cancellation(self)
|
||||
|
||||
def execute(self, silent=False) -> None:
|
||||
def execute(self) -> None:
|
||||
if self.is_canceled or self.is_executed:
|
||||
return
|
||||
|
||||
self.executed_at = jh.now_to_timestamp()
|
||||
self.status = order_statuses.EXECUTED
|
||||
|
||||
if jh.is_live():
|
||||
self.save()
|
||||
|
||||
if not silent:
|
||||
txt = f'EXECUTED order: {self.symbol}, {self.type}, {self.side}, {self.qty}'
|
||||
if self.price:
|
||||
txt += f', ${round(self.price, 2)}'
|
||||
# log
|
||||
if jh.is_debuggable('order_execution'):
|
||||
logger.info(txt)
|
||||
# notify
|
||||
if jh.is_live():
|
||||
self.broadcast()
|
||||
if config['env']['notifications']['events']['executed_orders']:
|
||||
notify(txt)
|
||||
# log
|
||||
if jh.is_debuggable('order_execution'):
|
||||
logger.info(
|
||||
f'EXECUTED order: {self.symbol}, {self.type}, {self.side}, {self.qty}, ${round(self.price, 2)}'
|
||||
)
|
||||
# notify
|
||||
if jh.is_live() and config['env']['notifications']['events']['executed_orders']:
|
||||
notify(
|
||||
f'EXECUTED order: {self.symbol}, {self.type}, {self.side}, {self.qty}, {round(self.price, 2)}'
|
||||
)
|
||||
|
||||
p = selectors.get_position(self.exchange, self.symbol)
|
||||
|
||||
@@ -200,6 +152,6 @@ class Order(Model):
|
||||
e.on_order_execution(self)
|
||||
|
||||
|
||||
# if database is open, create the table
|
||||
if database.is_open():
|
||||
if not jh.is_unit_testing():
|
||||
# create the table
|
||||
Order.create_table()
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import peewee
|
||||
|
||||
import jesse.helpers as jh
|
||||
from jesse.services.db import db
|
||||
|
||||
|
||||
class Orderbook(peewee.Model):
|
||||
id = peewee.UUIDField(primary_key=True)
|
||||
@@ -11,16 +14,19 @@ class Orderbook(peewee.Model):
|
||||
data = peewee.BlobField()
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
database = db
|
||||
indexes = ((('timestamp', 'exchange', 'symbol'), True),)
|
||||
|
||||
def __init__(self, attributes: dict = None, **kwargs) -> None:
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a, value in attributes.items():
|
||||
setattr(self, a, value)
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
|
||||
if not jh.is_unit_testing():
|
||||
# create the table
|
||||
Orderbook.create_table()
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
import jesse.helpers as jh
|
||||
@@ -13,7 +11,7 @@ from jesse.utils import sum_floats, subtract_floats
|
||||
|
||||
|
||||
class Position:
|
||||
def __init__(self, exchange_name: str, symbol: str, attributes: dict = None) -> None:
|
||||
def __init__(self, exchange_name: str, symbol: str, attributes=None) -> None:
|
||||
self.id = jh.generate_unique_id()
|
||||
self.entry_price = None
|
||||
self.exit_price = None
|
||||
@@ -39,21 +37,21 @@ class Position:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
@property
|
||||
def mark_price(self) -> float:
|
||||
def mark_price(self):
|
||||
if not jh.is_live():
|
||||
return self.current_price
|
||||
|
||||
return self._mark_price
|
||||
|
||||
@property
|
||||
def funding_rate(self) -> float:
|
||||
def funding_rate(self):
|
||||
if not jh.is_live():
|
||||
return 0
|
||||
|
||||
return self._funding_rate
|
||||
|
||||
@property
|
||||
def next_funding_timestamp(self) -> Union[int, None]:
|
||||
def next_funding_timestamp(self):
|
||||
if not jh.is_live():
|
||||
return None
|
||||
|
||||
@@ -114,7 +112,7 @@ class Position:
|
||||
return base_cost
|
||||
|
||||
@property
|
||||
def leverage(self) -> Union[int, np.float64]:
|
||||
def leverage(self):
|
||||
if self.exchange.type == 'spot':
|
||||
return 1
|
||||
|
||||
@@ -170,7 +168,7 @@ class Position:
|
||||
return self.exchange.futures_leverage_mode
|
||||
|
||||
@property
|
||||
def liquidation_price(self) -> Union[float, np.float64]:
|
||||
def liquidation_price(self):
|
||||
"""
|
||||
The price at which the position gets liquidated. formulas are taken from:
|
||||
https://help.bybit.com/hc/en-us/articles/900000181046-Liquidation-Price-USDT-Contract-
|
||||
@@ -181,10 +179,7 @@ class Position:
|
||||
if jh.is_livetrading():
|
||||
return self._liquidation_price
|
||||
|
||||
if self.mode in ['cross', 'spot']:
|
||||
return np.nan
|
||||
|
||||
elif self.mode == 'isolated':
|
||||
if self.mode == 'isolated':
|
||||
if self.type == 'long':
|
||||
return self.entry_price * (1 - self._initial_margin_rate + 0.004)
|
||||
elif self.type == 'short':
|
||||
@@ -192,15 +187,21 @@ class Position:
|
||||
else:
|
||||
return np.nan
|
||||
|
||||
elif self.mode == 'cross':
|
||||
return np.nan
|
||||
|
||||
elif self.mode == 'spot':
|
||||
return np.nan
|
||||
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@property
|
||||
def _initial_margin_rate(self) -> float:
|
||||
def _initial_margin_rate(self):
|
||||
return 1 / self.leverage
|
||||
|
||||
@property
|
||||
def bankruptcy_price(self) -> Union[float, np.float64]:
|
||||
def bankruptcy_price(self):
|
||||
if self.type == 'long':
|
||||
return self.entry_price * (1 - self._initial_margin_rate)
|
||||
elif self.type == 'short':
|
||||
@@ -271,7 +272,7 @@ class Position:
|
||||
raise OpenPositionError('position must be already open in order to increase its size')
|
||||
|
||||
qty = abs(qty)
|
||||
# size = qty * price
|
||||
size = qty * price
|
||||
|
||||
# if self.exchange:
|
||||
# self.exchange.decrease_futures_balance(size)
|
||||
@@ -301,7 +302,7 @@ class Position:
|
||||
self.qty = qty
|
||||
self.opened_at = jh.now_to_timestamp()
|
||||
|
||||
info_text = f'OPENED {self.type} position: {self.exchange_name}, {self.symbol}, {self.qty}, ${round(self.entry_price, 2)}'
|
||||
info_text = f'OPENED {self.type} position: {self.exchange_name}, {self.symbol}, { self.qty}, ${round(self.entry_price, 2)}'
|
||||
|
||||
if jh.is_debuggable('position_opened'):
|
||||
logger.info(info_text)
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
class Route:
|
||||
def __init__(self, exchange: str, symbol: str, timeframe: str = None, strategy_name: str = None,
|
||||
dna: str = None) -> None:
|
||||
# replace PERP with USD in FTX routes
|
||||
if exchange.startswith('FTX') and symbol.upper().endswith('PERP'):
|
||||
symbol = symbol.replace('PERP', 'USD')
|
||||
|
||||
self.exchange = exchange
|
||||
self.symbol = symbol
|
||||
self.timeframe = timeframe
|
||||
|
||||
@@ -7,10 +7,10 @@ from .Exchange import Exchange
|
||||
|
||||
|
||||
class SpotExchange(Exchange):
|
||||
def add_realized_pnl(self, realized_pnl: float) -> None:
|
||||
def add_realized_pnl(self, realized_pnl: float):
|
||||
pass
|
||||
|
||||
def charge_fee(self, amount: float) -> None:
|
||||
def charge_fee(self, amount):
|
||||
pass
|
||||
|
||||
# current holding assets
|
||||
@@ -24,18 +24,19 @@ class SpotExchange(Exchange):
|
||||
from jesse.routes import router
|
||||
# check if base assets are configured
|
||||
for route in router.routes:
|
||||
base_asset = jh.base_asset(route.symbol)
|
||||
if base_asset not in self.available_assets:
|
||||
raise InvalidConfig(
|
||||
f"Jesse needs to know the balance of your base asset for spot mode. Please add {base_asset} to your exchanges assets config.")
|
||||
base_asset = jh.base_asset(route.symbol)
|
||||
if base_asset not in self.available_assets:
|
||||
raise InvalidConfig(
|
||||
f"Jesse needs to know the balance of your base asset for spot mode. Please add {base_asset} to your exchanges assets config.")
|
||||
|
||||
def wallet_balance(self, symbol: str = '') -> float:
|
||||
|
||||
def wallet_balance(self, symbol=''):
|
||||
if symbol == '':
|
||||
raise ValueError
|
||||
quote_asset = jh.quote_asset(symbol)
|
||||
return self.assets[quote_asset]
|
||||
|
||||
def available_margin(self, symbol: str = '') -> float:
|
||||
def available_margin(self, symbol=''):
|
||||
return self.wallet_balance(symbol)
|
||||
|
||||
def on_order_submission(self, order: Order, skip_market_order=True):
|
||||
@@ -81,7 +82,7 @@ class SpotExchange(Exchange):
|
||||
f'Available balance for {base_asset} on {self.name} changed from {round(temp_old_base_available_asset, 2)} to {round(temp_new_base_available_asset, 2)}'
|
||||
)
|
||||
|
||||
def on_order_execution(self, order: Order) -> None:
|
||||
def on_order_execution(self, order: Order):
|
||||
base_asset = jh.base_asset(order.symbol)
|
||||
quote_asset = jh.quote_asset(order.symbol)
|
||||
|
||||
@@ -108,7 +109,7 @@ class SpotExchange(Exchange):
|
||||
temp_new_quote_asset = self.assets[quote_asset]
|
||||
if jh.is_debuggable('balance_update') and temp_old_quote_asset != temp_new_quote_asset:
|
||||
logger.info(
|
||||
f'Balance for {quote_asset} on {self.name} changed from {round(temp_old_quote_asset, 2)} to {round(temp_new_quote_asset, 2)}'
|
||||
f'Balance for {quote_asset} on {self.name} changed from {round(temp_old_quote_asset, 2)} to { round(temp_new_quote_asset, 2)}'
|
||||
)
|
||||
temp_new_quote_available_asset = self.available_assets[quote_asset]
|
||||
if jh.is_debuggable('balance_update') and temp_old_quote_available_asset != temp_new_quote_available_asset:
|
||||
@@ -127,7 +128,7 @@ class SpotExchange(Exchange):
|
||||
f'Balance for {base_asset} on {self.name} changed from {round(temp_old_base_available_asset, 2)} to {round(temp_new_base_available_asset, 2)}'
|
||||
)
|
||||
|
||||
def on_order_cancellation(self, order: Order) -> None:
|
||||
def on_order_cancellation(self, order: Order):
|
||||
base_asset = jh.base_asset(order.symbol)
|
||||
quote_asset = jh.quote_asset(order.symbol)
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import peewee
|
||||
|
||||
import jesse.helpers as jh
|
||||
from jesse.services.db import db
|
||||
|
||||
|
||||
class Ticker(peewee.Model):
|
||||
id = peewee.UUIDField(primary_key=True)
|
||||
@@ -17,17 +20,19 @@ class Ticker(peewee.Model):
|
||||
exchange = peewee.CharField()
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
database = db
|
||||
indexes = ((('timestamp', 'exchange', 'symbol'), True),)
|
||||
|
||||
def __init__(self, attributes: dict = None, **kwargs) -> None:
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a, value in attributes.items():
|
||||
setattr(self, a, value)
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
|
||||
if not jh.is_unit_testing():
|
||||
# create the table
|
||||
Ticker.create_table()
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import peewee
|
||||
|
||||
import jesse.helpers as jh
|
||||
from jesse.services.db import db
|
||||
|
||||
|
||||
class Trade(peewee.Model):
|
||||
id = peewee.UUIDField(primary_key=True)
|
||||
@@ -18,16 +21,19 @@ class Trade(peewee.Model):
|
||||
exchange = peewee.CharField()
|
||||
|
||||
class Meta:
|
||||
from jesse.services.db import database
|
||||
|
||||
database = database.db
|
||||
database = db
|
||||
indexes = ((('timestamp', 'exchange', 'symbol'), True),)
|
||||
|
||||
def __init__(self, attributes: dict = None, **kwargs) -> None:
|
||||
def __init__(self, attributes=None, **kwargs) -> None:
|
||||
peewee.Model.__init__(self, attributes=attributes, **kwargs)
|
||||
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
for a, value in attributes.items():
|
||||
setattr(self, a, value)
|
||||
for a in attributes:
|
||||
setattr(self, a, attributes[a])
|
||||
|
||||
|
||||
if not jh.is_unit_testing():
|
||||
# create the table
|
||||
Trade.create_table()
|
||||
|
||||
@@ -7,6 +7,4 @@ from .Position import Position
|
||||
from .Route import Route
|
||||
from .SpotExchange import SpotExchange
|
||||
from .Ticker import Ticker
|
||||
from .Log import Log
|
||||
# from .DailyBalance import DailyBalance
|
||||
from .utils import store_candle_into_db, store_ticker_into_db, store_trade_into_db, store_orderbook_into_db
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
|
||||
import jesse.helpers as jh
|
||||
from jesse.models.Candle import Candle
|
||||
from jesse.models.CompletedTrade import CompletedTrade
|
||||
from jesse.models.DailyBalance import DailyBalance
|
||||
from jesse.models.Order import Order
|
||||
from jesse.models.Orderbook import Orderbook
|
||||
from jesse.models.Ticker import Ticker
|
||||
from jesse.models.Trade import Trade
|
||||
from jesse.services import logger
|
||||
|
||||
|
||||
def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray, on_conflict='ignore') -> None:
|
||||
from jesse.models.Candle import Candle
|
||||
|
||||
def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray) -> None:
|
||||
d = {
|
||||
'id': jh.generate_unique_id(),
|
||||
'symbol': symbol,
|
||||
@@ -21,43 +27,13 @@ def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray, on_conf
|
||||
}
|
||||
|
||||
def async_save() -> None:
|
||||
if on_conflict == 'ignore':
|
||||
Candle.insert(**d).on_conflict_ignore().execute()
|
||||
elif on_conflict == 'replace':
|
||||
Candle.insert(**d).on_conflict(
|
||||
conflict_target=['timestamp', 'symbol', 'exchange'],
|
||||
preserve=(Candle.open, Candle.high, Candle.low, Candle.close, Candle.volume),
|
||||
).execute()
|
||||
elif on_conflict == 'error':
|
||||
Candle.insert(**d).execute()
|
||||
else:
|
||||
raise Exception(f'Unknown on_conflict value: {on_conflict}')
|
||||
|
||||
# async call
|
||||
threading.Thread(target=async_save).start()
|
||||
|
||||
|
||||
def store_log_into_db(log: dict, log_type: str) -> None:
|
||||
from jesse.store import store
|
||||
from jesse.models.Log import Log
|
||||
|
||||
if log_type == 'info':
|
||||
log_type = 1
|
||||
elif log_type == 'error':
|
||||
log_type = 2
|
||||
else:
|
||||
raise ValueError(f"Unsupported log_type value: {log_type}")
|
||||
|
||||
d = {
|
||||
'id': log['id'],
|
||||
'timestamp': log['timestamp'],
|
||||
'message': log['message'],
|
||||
'session_id': store.app.session_id,
|
||||
'type': log_type
|
||||
}
|
||||
|
||||
def async_save() -> None:
|
||||
Log.insert(**d).execute()
|
||||
Candle.insert(**d).on_conflict_ignore().execute()
|
||||
print(
|
||||
jh.color(
|
||||
f"candle: {jh.timestamp_to_time(d['timestamp'])}-{exchange}-{symbol}: {candle}",
|
||||
'blue'
|
||||
)
|
||||
)
|
||||
|
||||
# async call
|
||||
threading.Thread(target=async_save).start()
|
||||
@@ -65,8 +41,6 @@ def store_log_into_db(log: dict, log_type: str) -> None:
|
||||
|
||||
def store_ticker_into_db(exchange: str, symbol: str, ticker: np.ndarray) -> None:
|
||||
return
|
||||
from jesse.models.Ticker import Ticker
|
||||
|
||||
d = {
|
||||
'id': jh.generate_unique_id(),
|
||||
'timestamp': ticker[0],
|
||||
@@ -88,10 +62,8 @@ def store_ticker_into_db(exchange: str, symbol: str, ticker: np.ndarray) -> None
|
||||
threading.Thread(target=async_save).start()
|
||||
|
||||
|
||||
def store_completed_trade_into_db(completed_trade) -> None:
|
||||
def store_completed_trade_into_db(completed_trade: CompletedTrade) -> None:
|
||||
return
|
||||
from jesse.models.CompletedTrade import CompletedTrade
|
||||
|
||||
d = {
|
||||
'id': completed_trade.id,
|
||||
'strategy_name': completed_trade.strategy_name,
|
||||
@@ -101,6 +73,8 @@ def store_completed_trade_into_db(completed_trade) -> None:
|
||||
'timeframe': completed_trade.timeframe,
|
||||
'entry_price': completed_trade.entry_price,
|
||||
'exit_price': completed_trade.exit_price,
|
||||
'take_profit_at': completed_trade.take_profit_at,
|
||||
'stop_loss_at': completed_trade.stop_loss_at,
|
||||
'qty': completed_trade.qty,
|
||||
'opened_at': completed_trade.opened_at,
|
||||
'closed_at': completed_trade.closed_at,
|
||||
@@ -112,17 +86,14 @@ def store_completed_trade_into_db(completed_trade) -> None:
|
||||
def async_save() -> None:
|
||||
CompletedTrade.insert(**d).execute()
|
||||
if jh.is_debugging():
|
||||
logger.info(
|
||||
f'Stored the completed trade record for {completed_trade.exchange}-{completed_trade.symbol}-{completed_trade.strategy_name} into database.')
|
||||
logger.info(f'Stored the completed trade record for {completed_trade.exchange}-{completed_trade.symbol}-{completed_trade.strategy_name} into database.')
|
||||
|
||||
# async call
|
||||
threading.Thread(target=async_save).start()
|
||||
|
||||
|
||||
def store_order_into_db(order) -> None:
|
||||
def store_order_into_db(order: Order) -> None:
|
||||
return
|
||||
from jesse.models.Order import Order
|
||||
|
||||
d = {
|
||||
'id': order.id,
|
||||
'trade_id': order.trade_id,
|
||||
@@ -153,14 +124,11 @@ def store_order_into_db(order) -> None:
|
||||
|
||||
def store_daily_balance_into_db(daily_balance: dict) -> None:
|
||||
return
|
||||
from jesse.models.DailyBalance import DailyBalance
|
||||
|
||||
def async_save():
|
||||
DailyBalance.insert(**daily_balance).execute()
|
||||
if jh.is_debugging():
|
||||
logger.info(
|
||||
f'Stored daily portfolio balance record into the database: {daily_balance["asset"]} => {jh.format_currency(round(daily_balance["balance"], 2))}'
|
||||
)
|
||||
logger.info(f'Stored daily portfolio balance record into the database: {daily_balance["asset"]} => {jh.format_currency(round(daily_balance["balance"], 2))}'
|
||||
)
|
||||
|
||||
# async call
|
||||
threading.Thread(target=async_save).start()
|
||||
@@ -168,8 +136,6 @@ def store_daily_balance_into_db(daily_balance: dict) -> None:
|
||||
|
||||
def store_trade_into_db(exchange: str, symbol: str, trade: np.ndarray) -> None:
|
||||
return
|
||||
from jesse.models.Trade import Trade
|
||||
|
||||
d = {
|
||||
'id': jh.generate_unique_id(),
|
||||
'timestamp': trade[0],
|
||||
@@ -197,8 +163,6 @@ def store_trade_into_db(exchange: str, symbol: str, trade: np.ndarray) -> None:
|
||||
|
||||
def store_orderbook_into_db(exchange: str, symbol: str, orderbook: np.ndarray) -> None:
|
||||
return
|
||||
from jesse.models.Orderbook import Orderbook
|
||||
|
||||
d = {
|
||||
'id': jh.generate_unique_id(),
|
||||
'timestamp': jh.now_to_timestamp(),
|
||||
@@ -221,9 +185,7 @@ def store_orderbook_into_db(exchange: str, symbol: str, orderbook: np.ndarray) -
|
||||
|
||||
|
||||
def fetch_candles_from_db(exchange: str, symbol: str, start_date: int, finish_date: int) -> tuple:
|
||||
from jesse.models.Candle import Candle
|
||||
|
||||
return tuple(
|
||||
candles_tuple = tuple(
|
||||
Candle.select(
|
||||
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
|
||||
Candle.volume
|
||||
@@ -233,3 +195,5 @@ def fetch_candles_from_db(exchange: str, symbol: str, start_date: int, finish_da
|
||||
Candle.symbol == symbol
|
||||
).order_by(Candle.timestamp.asc()).tuples()
|
||||
)
|
||||
|
||||
return candles_tuple
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import Dict, Union, List
|
||||
from typing import Dict, Union
|
||||
|
||||
import arrow
|
||||
import click
|
||||
@@ -10,9 +10,10 @@ import jesse.helpers as jh
|
||||
import jesse.services.metrics as stats
|
||||
import jesse.services.required_candles as required_candles
|
||||
import jesse.services.selectors as selectors
|
||||
import jesse.services.table as table
|
||||
from jesse import exceptions
|
||||
from jesse.config import config
|
||||
from jesse.enums import timeframes, order_types, order_roles, order_flags
|
||||
from jesse.enums import timeframes, order_types, sides, order_roles, order_flags
|
||||
from jesse.models import Candle, Order, Position
|
||||
from jesse.modes.utils import save_daily_portfolio_balance
|
||||
from jesse.routes import router
|
||||
@@ -25,53 +26,11 @@ from jesse.services.file import store_logs
|
||||
from jesse.services.validators import validate_routes
|
||||
from jesse.store import store
|
||||
from jesse.services import logger
|
||||
from jesse.services.failure import register_custom_exception_handler
|
||||
from jesse.services.redis import sync_publish, process_status
|
||||
from timeloop import Timeloop
|
||||
from datetime import timedelta
|
||||
from jesse.services.progressbar import Progressbar
|
||||
|
||||
|
||||
def run(
|
||||
debug_mode,
|
||||
user_config: dict,
|
||||
routes: List[Dict[str, str]],
|
||||
extra_routes: List[Dict[str, str]],
|
||||
start_date: str,
|
||||
finish_date: str,
|
||||
candles: dict = None,
|
||||
chart: bool = False,
|
||||
tradingview: bool = False,
|
||||
full_reports: bool = False,
|
||||
csv: bool = False,
|
||||
json: bool = False
|
||||
) -> None:
|
||||
if not jh.is_unit_testing():
|
||||
# at every second, we check to see if it's time to execute stuff
|
||||
status_checker = Timeloop()
|
||||
@status_checker.job(interval=timedelta(seconds=1))
|
||||
def handle_time():
|
||||
if process_status() != 'started':
|
||||
raise exceptions.Termination
|
||||
status_checker.start()
|
||||
|
||||
from jesse.config import config, set_config
|
||||
config['app']['trading_mode'] = 'backtest'
|
||||
|
||||
# debug flag
|
||||
config['app']['debug_mode'] = debug_mode
|
||||
|
||||
# inject config
|
||||
if not jh.is_unit_testing():
|
||||
set_config(user_config)
|
||||
|
||||
# set routes
|
||||
router.initiate(routes, extra_routes)
|
||||
|
||||
store.app.set_session_id()
|
||||
|
||||
register_custom_exception_handler()
|
||||
|
||||
def run(start_date: str, finish_date: str, candles: Dict[str, Dict[str, Union[str, np.ndarray]]] = None,
|
||||
chart: bool = False, tradingview: bool = False, full_reports: bool = False,
|
||||
csv: bool = False, json: bool = False) -> None:
|
||||
# clear the screen
|
||||
if not jh.should_execute_silently():
|
||||
click.clear()
|
||||
@@ -84,42 +43,77 @@ def run(
|
||||
|
||||
# load historical candles
|
||||
if candles is None:
|
||||
print('loading candles...')
|
||||
candles = load_candles(start_date, finish_date)
|
||||
click.clear()
|
||||
|
||||
if not jh.should_execute_silently():
|
||||
sync_publish('general_info', {
|
||||
'session_id': jh.get_session_id(),
|
||||
'debug_mode': str(config['app']['debug_mode']),
|
||||
})
|
||||
|
||||
# candles info
|
||||
# print candles table
|
||||
key = f"{config['app']['considering_candles'][0][0]}-{config['app']['considering_candles'][0][1]}"
|
||||
sync_publish('candles_info', stats.candles_info(candles[key]['candles']))
|
||||
table.key_value(stats.candles(candles[key]['candles']), 'candles', alignments=('left', 'right'))
|
||||
print('\n')
|
||||
|
||||
# routes info
|
||||
sync_publish('routes_info', stats.routes(router.routes))
|
||||
# print routes table
|
||||
table.multi_value(stats.routes(router.routes))
|
||||
print('\n')
|
||||
|
||||
# print guidance for debugging candles
|
||||
if jh.is_debuggable('trading_candles') or jh.is_debuggable('shorter_period_candles'):
|
||||
print(' Symbol | timestamp | open | close | high | low | volume')
|
||||
|
||||
# run backtest simulation
|
||||
simulator(candles, run_silently=jh.should_execute_silently())
|
||||
simulator(candles)
|
||||
|
||||
if not jh.should_execute_silently():
|
||||
# print trades metrics
|
||||
if store.completed_trades.count > 0:
|
||||
sync_publish('metrics', report.portfolio_metrics())
|
||||
|
||||
change = []
|
||||
# calcualte market change
|
||||
for e in router.routes:
|
||||
if e.strategy is None:
|
||||
return
|
||||
|
||||
first = Candle.select(
|
||||
Candle.close
|
||||
).where(
|
||||
Candle.timestamp == jh.date_to_timestamp(start_date),
|
||||
Candle.exchange == e.exchange,
|
||||
Candle.symbol == e.symbol
|
||||
).first()
|
||||
last = Candle.select(
|
||||
Candle.close
|
||||
).where(
|
||||
Candle.timestamp == jh.date_to_timestamp(finish_date) - 60000,
|
||||
Candle.exchange == e.exchange,
|
||||
Candle.symbol == e.symbol
|
||||
).first()
|
||||
|
||||
change.append(((last.close - first.close) / first.close) * 100.0)
|
||||
|
||||
data = report.portfolio_metrics()
|
||||
data.append(['Market Change', f"{str(round(np.average(change), 2))}%"])
|
||||
print('\n')
|
||||
table.key_value(data, 'Metrics', alignments=('left', 'right'))
|
||||
print('\n')
|
||||
|
||||
# save logs
|
||||
more = ""
|
||||
routes_count = len(router.routes)
|
||||
more = f"-and-{routes_count - 1}-more" if routes_count > 1 else ""
|
||||
if routes_count > 1:
|
||||
more = f"-and-{routes_count-1}-more"
|
||||
|
||||
study_name = f"{router.routes[0].strategy_name}-{router.routes[0].exchange}-{router.routes[0].symbol}-{router.routes[0].timeframe}{more}-{start_date}-{finish_date}"
|
||||
store_logs(study_name, json, tradingview, csv)
|
||||
|
||||
if chart:
|
||||
charts.portfolio_vs_asset_returns(study_name)
|
||||
|
||||
sync_publish('equity_curve', charts.equity_curve())
|
||||
|
||||
# QuantStats' report
|
||||
if full_reports:
|
||||
|
||||
price_data = []
|
||||
|
||||
# load close candles for Buy and hold and calculate pct_change
|
||||
for index, c in enumerate(config['app']['considering_candles']):
|
||||
exchange, symbol = c[0], c[1]
|
||||
@@ -146,12 +140,7 @@ def run(
|
||||
bh_daily_returns_all_routes = price_pct_change.mean(1)
|
||||
quantstats.quantstats_tearsheet(bh_daily_returns_all_routes, study_name)
|
||||
else:
|
||||
sync_publish('equity_curve', None)
|
||||
sync_publish('metrics', None)
|
||||
|
||||
# close database connection
|
||||
from jesse.services.db import database
|
||||
database.close_connection()
|
||||
print(jh.color('No trades were made.', 'yellow'))
|
||||
|
||||
|
||||
def load_candles(start_date_str: str, finish_date_str: str) -> Dict[str, Dict[str, Union[str, np.ndarray]]]:
|
||||
@@ -164,8 +153,7 @@ def load_candles(start_date_str: str, finish_date_str: str) -> Dict[str, Dict[st
|
||||
if start_date > finish_date:
|
||||
raise ValueError('start_date cannot be bigger than finish_date.')
|
||||
if finish_date > arrow.utcnow().int_timestamp * 1000:
|
||||
raise ValueError(
|
||||
"Can't load candle data from the future! The finish-date can be up to yesterday's date at most.")
|
||||
raise ValueError("Can't load candle data from the future!")
|
||||
|
||||
# load and add required warm-up candles for backtest
|
||||
if jh.is_backtesting():
|
||||
@@ -185,10 +173,13 @@ def load_candles(start_date_str: str, finish_date_str: str) -> Dict[str, Dict[st
|
||||
|
||||
cache_key = f"{start_date_str}-{finish_date_str}-{key}"
|
||||
cached_value = cache.get_value(cache_key)
|
||||
# if cache exists use cache_value
|
||||
# if cache exists
|
||||
if cached_value:
|
||||
candles_tuple = cached_value
|
||||
# not cached, get and cache for later calls in the next 5 minutes
|
||||
# fetch from database
|
||||
candles_tuple = cached_value or Candle.select(
|
||||
else:
|
||||
# fetch from database
|
||||
candles_tuple = Candle.select(
|
||||
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
|
||||
Candle.volume
|
||||
).where(
|
||||
@@ -196,12 +187,12 @@ def load_candles(start_date_str: str, finish_date_str: str) -> Dict[str, Dict[st
|
||||
Candle.exchange == exchange,
|
||||
Candle.symbol == symbol
|
||||
).order_by(Candle.timestamp.asc()).tuples()
|
||||
|
||||
# validate that there are enough candles for selected period
|
||||
required_candles_count = (finish_date - start_date) / 60_000
|
||||
if len(candles_tuple) == 0 or candles_tuple[-1][0] != finish_date or candles_tuple[0][0] != start_date:
|
||||
raise exceptions.CandleNotFoundInDatabase(
|
||||
f'Not enough candles for {symbol}. You need to import candles.'
|
||||
)
|
||||
f'Not enough candles for {symbol}. Try running "jesse import-candles"')
|
||||
elif len(candles_tuple) != required_candles_count + 1:
|
||||
raise exceptions.CandleNotFoundInDatabase(
|
||||
f'There are missing candles between {start_date_str} => {finish_date_str}')
|
||||
@@ -218,9 +209,7 @@ def load_candles(start_date_str: str, finish_date_str: str) -> Dict[str, Dict[st
|
||||
return candles
|
||||
|
||||
|
||||
def simulator(
|
||||
candles: dict, run_silently: bool, hyperparameters: dict = None
|
||||
) -> None:
|
||||
def simulator(candles: Dict[str, Dict[str, Union[str, np.ndarray]]], hyperparameters=None) -> None:
|
||||
begin_time_track = time.time()
|
||||
key = f"{config['app']['considering_candles'][0][0]}-{config['app']['considering_candles'][0][1]}"
|
||||
first_candles_set = candles[key]['candles']
|
||||
@@ -248,10 +237,10 @@ def simulator(
|
||||
r.strategy.symbol = r.symbol
|
||||
r.strategy.timeframe = r.timeframe
|
||||
|
||||
# read the dna from strategy's dna() and use it for injecting inject hyperparameters
|
||||
# first convert DNS string into hyperparameters
|
||||
if len(r.strategy.dna()) > 0 and hyperparameters is None:
|
||||
hyperparameters = jh.dna_to_hp(r.strategy.hyperparameters(), r.strategy.dna())
|
||||
# inject hyper parameters (used for optimize_mode)
|
||||
# convert DNS string into hyperparameters
|
||||
if r.dna and hyperparameters is None:
|
||||
hyperparameters = jh.dna_to_hp(r.strategy.hyperparameters(), r.dna)
|
||||
|
||||
# inject hyperparameters sent within the optimize mode
|
||||
if hyperparameters is not None:
|
||||
@@ -266,79 +255,75 @@ def simulator(
|
||||
# add initial balance
|
||||
save_daily_portfolio_balance()
|
||||
|
||||
progressbar = Progressbar(length, step=60)
|
||||
for i in range(length):
|
||||
# update time
|
||||
store.app.time = first_candles_set[i][0] + 60_000
|
||||
with click.progressbar(length=length, label='Executing simulation...') as progressbar:
|
||||
for i in range(length):
|
||||
# update time
|
||||
store.app.time = first_candles_set[i][0] + 60_000
|
||||
|
||||
# add candles
|
||||
for j in candles:
|
||||
short_candle = candles[j]['candles'][i]
|
||||
if i != 0:
|
||||
previous_short_candle = candles[j]['candles'][i - 1]
|
||||
short_candle = _get_fixed_jumped_candle(previous_short_candle, short_candle)
|
||||
exchange = candles[j]['exchange']
|
||||
symbol = candles[j]['symbol']
|
||||
# add candles
|
||||
for j in candles:
|
||||
short_candle = candles[j]['candles'][i]
|
||||
if i != 0:
|
||||
previous_short_candle = candles[j]['candles'][i - 1]
|
||||
short_candle = _get_fixed_jumped_candle(previous_short_candle, short_candle)
|
||||
exchange = candles[j]['exchange']
|
||||
symbol = candles[j]['symbol']
|
||||
|
||||
store.candles.add_candle(short_candle, exchange, symbol, '1m', with_execution=False,
|
||||
with_generation=False)
|
||||
store.candles.add_candle(short_candle, exchange, symbol, '1m', with_execution=False,
|
||||
with_generation=False)
|
||||
|
||||
# print short candle
|
||||
if jh.is_debuggable('shorter_period_candles'):
|
||||
print_candle(short_candle, True, symbol)
|
||||
# print short candle
|
||||
if jh.is_debuggable('shorter_period_candles'):
|
||||
print_candle(short_candle, True, symbol)
|
||||
|
||||
_simulate_price_change_effect(short_candle, exchange, symbol)
|
||||
_simulate_price_change_effect(short_candle, exchange, symbol)
|
||||
|
||||
# generate and add candles for bigger timeframes
|
||||
for timeframe in config['app']['considering_timeframes']:
|
||||
# for 1m, no work is needed
|
||||
if timeframe == '1m':
|
||||
continue
|
||||
# generate and add candles for bigger timeframes
|
||||
for timeframe in config['app']['considering_timeframes']:
|
||||
# for 1m, no work is needed
|
||||
if timeframe == '1m':
|
||||
continue
|
||||
|
||||
count = jh.timeframe_to_one_minutes(timeframe)
|
||||
# until = count - ((i + 1) % count)
|
||||
count = jh.timeframe_to_one_minutes(timeframe)
|
||||
until = count - ((i + 1) % count)
|
||||
|
||||
if (i + 1) % count == 0:
|
||||
generated_candle = generate_candle_from_one_minutes(
|
||||
timeframe,
|
||||
candles[j]['candles'][(i - (count - 1)):(i + 1)])
|
||||
store.candles.add_candle(generated_candle, exchange, symbol, timeframe, with_execution=False,
|
||||
with_generation=False)
|
||||
if (i + 1) % count == 0:
|
||||
generated_candle = generate_candle_from_one_minutes(
|
||||
timeframe,
|
||||
candles[j]['candles'][(i - (count - 1)):(i + 1)])
|
||||
store.candles.add_candle(generated_candle, exchange, symbol, timeframe, with_execution=False,
|
||||
with_generation=False)
|
||||
|
||||
# update progressbar
|
||||
if not run_silently and i % 60 == 0:
|
||||
progressbar.update()
|
||||
sync_publish('progressbar', {
|
||||
'current': progressbar.current,
|
||||
'estimated_remaining_seconds': progressbar.estimated_remaining_seconds
|
||||
})
|
||||
# update progressbar
|
||||
if not jh.is_debugging() and not jh.should_execute_silently() and i % 60 == 0:
|
||||
progressbar.update(60)
|
||||
|
||||
# now that all new generated candles are ready, execute
|
||||
for r in router.routes:
|
||||
count = jh.timeframe_to_one_minutes(r.timeframe)
|
||||
# 1m timeframe
|
||||
if r.timeframe == timeframes.MINUTE_1:
|
||||
r.strategy._execute()
|
||||
elif (i + 1) % count == 0:
|
||||
# print candle
|
||||
if jh.is_debuggable('trading_candles'):
|
||||
print_candle(store.candles.get_current_candle(r.exchange, r.symbol, r.timeframe), False,
|
||||
r.symbol)
|
||||
r.strategy._execute()
|
||||
# now that all new generated candles are ready, execute
|
||||
for r in router.routes:
|
||||
count = jh.timeframe_to_one_minutes(r.timeframe)
|
||||
# 1m timeframe
|
||||
if r.timeframe == timeframes.MINUTE_1:
|
||||
r.strategy._execute()
|
||||
elif (i + 1) % count == 0:
|
||||
# print candle
|
||||
if jh.is_debuggable('trading_candles'):
|
||||
print_candle(store.candles.get_current_candle(r.exchange, r.symbol, r.timeframe), False,
|
||||
r.symbol)
|
||||
r.strategy._execute()
|
||||
|
||||
# now check to see if there's any MARKET orders waiting to be executed
|
||||
store.orders.execute_pending_market_orders()
|
||||
# now check to see if there's any MARKET orders waiting to be executed
|
||||
store.orders.execute_pending_market_orders()
|
||||
|
||||
if i != 0 and i % 1440 == 0:
|
||||
save_daily_portfolio_balance()
|
||||
if i != 0 and i % 1440 == 0:
|
||||
save_daily_portfolio_balance()
|
||||
|
||||
if not jh.should_execute_silently():
|
||||
if jh.is_debuggable('trading_candles') or jh.is_debuggable('shorter_period_candles'):
|
||||
print('\n')
|
||||
|
||||
if not run_silently:
|
||||
# print executed time for the backtest session
|
||||
finish_time_track = time.time()
|
||||
sync_publish('alert', {
|
||||
'message': f'Successfully executed backtest simulation in: {round(finish_time_track - begin_time_track, 2)} seconds',
|
||||
'type': 'success'
|
||||
})
|
||||
print('Executed backtest simulation in: ', f'{round(finish_time_track - begin_time_track, 2)} seconds')
|
||||
|
||||
for r in router.routes:
|
||||
r.strategy._terminate()
|
||||
@@ -1,167 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import numpy as np
|
||||
import peewee
|
||||
|
||||
from fastapi.responses import FileResponse
|
||||
import jesse.helpers as jh
|
||||
|
||||
|
||||
def get_candles(exchange: str, symbol: str, timeframe: str):
|
||||
from jesse.services.db import database
|
||||
database.open_connection()
|
||||
|
||||
from jesse.services.candle import generate_candle_from_one_minutes
|
||||
from jesse.models.utils import fetch_candles_from_db
|
||||
|
||||
symbol = symbol.upper()
|
||||
num_candles = 210
|
||||
|
||||
one_min_count = jh.timeframe_to_one_minutes(timeframe)
|
||||
finish_date = jh.now(force_fresh=True)
|
||||
start_date = finish_date - (num_candles * one_min_count * 60_000)
|
||||
|
||||
# fetch from database
|
||||
candles = np.array(
|
||||
fetch_candles_from_db(exchange, symbol, start_date, finish_date)
|
||||
)
|
||||
|
||||
if timeframe != '1m':
|
||||
generated_candles = [generate_candle_from_one_minutes(
|
||||
timeframe,
|
||||
candles[(i - (one_min_count - 1)):(i + 1)],
|
||||
True
|
||||
) for i in range(len(candles)) if (i + 1) % one_min_count == 0]
|
||||
candles = generated_candles
|
||||
|
||||
database.close_connection()
|
||||
|
||||
return [
|
||||
{
|
||||
'time': int(c[0] / 1000),
|
||||
'open': c[1],
|
||||
'close': c[2],
|
||||
'high': c[3],
|
||||
'low': c[4],
|
||||
'volume': c[5],
|
||||
} for c in candles
|
||||
]
|
||||
|
||||
|
||||
def get_general_info(has_live=False) -> dict:
|
||||
from jesse.modes.import_candles_mode.drivers import drivers
|
||||
|
||||
if has_live:
|
||||
from jesse_live.info import SUPPORTED_EXCHANGES_NAMES
|
||||
live_exchanges = list(sorted(SUPPORTED_EXCHANGES_NAMES))
|
||||
else:
|
||||
live_exchanges = []
|
||||
|
||||
exchanges = list(sorted(drivers.keys()))
|
||||
strategies_path = os.getcwd() + "/strategies/"
|
||||
strategies = list(sorted([name for name in os.listdir(strategies_path) if os.path.isdir(strategies_path + name)]))
|
||||
|
||||
return {
|
||||
'exchanges': exchanges,
|
||||
'live_exchanges': live_exchanges,
|
||||
'strategies': strategies,
|
||||
'has_live_plugin_installed': has_live,
|
||||
'cpu_cores': jh.cpu_cores_count()
|
||||
}
|
||||
|
||||
|
||||
def get_config(client_config: dict, has_live=False) -> dict:
|
||||
from jesse.services.db import database
|
||||
database.open_connection()
|
||||
|
||||
from jesse.models.Option import Option
|
||||
|
||||
try:
|
||||
o = Option.get(Option.type == 'config')
|
||||
|
||||
# merge it with client's config (because it could include new keys added),
|
||||
# update it in the database, and then return it
|
||||
data = jh.merge_dicts(client_config, json.loads(o.json))
|
||||
|
||||
# make sure the list of BACKTEST exchanges is up to date
|
||||
from jesse.modes.import_candles_mode.drivers import drivers
|
||||
for k in list(data['backtest']['exchanges'].keys()):
|
||||
if k not in drivers:
|
||||
del data['backtest']['exchanges'][k]
|
||||
|
||||
# make sure the list of LIVE exchanges is up to date
|
||||
if has_live:
|
||||
from jesse_live.info import SUPPORTED_EXCHANGES_NAMES
|
||||
live_exchanges = list(sorted(SUPPORTED_EXCHANGES_NAMES))
|
||||
for k in list(data['live']['exchanges'].keys()):
|
||||
if k not in live_exchanges:
|
||||
del data['live']['exchanges'][k]
|
||||
|
||||
# fix the settlement_currency of exchanges
|
||||
for k, e in data['live']['exchanges'].items():
|
||||
e['settlement_currency'] = jh.get_settlement_currency_from_exchange(e['name'])
|
||||
for k, e in data['backtest']['exchanges'].items():
|
||||
e['settlement_currency'] = jh.get_settlement_currency_from_exchange(e['name'])
|
||||
|
||||
o.updated_at = jh.now()
|
||||
o.save()
|
||||
except peewee.DoesNotExist:
|
||||
# if not found, that means it's the first time. Store in the DB and
|
||||
# then return what was sent from the client side without changing it
|
||||
o = Option({
|
||||
'id': jh.generate_unique_id(),
|
||||
'updated_at': jh.now(),
|
||||
'type': 'config',
|
||||
'json': json.dumps(client_config)
|
||||
})
|
||||
o.save(force_insert=True)
|
||||
|
||||
data = client_config
|
||||
|
||||
database.close_connection()
|
||||
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
def update_config(client_config: dict):
|
||||
from jesse.services.db import database
|
||||
database.open_connection()
|
||||
|
||||
from jesse.models.Option import Option
|
||||
|
||||
# at this point there must already be one option record for "config" existing, so:
|
||||
o = Option.get(Option.type == 'config')
|
||||
|
||||
o.json = json.dumps(client_config)
|
||||
o.updated_at = jh.now()
|
||||
|
||||
o.save()
|
||||
|
||||
database.close_connection()
|
||||
|
||||
|
||||
def download_file(mode: str, file_type: str, session_id: str):
|
||||
if mode == 'backtest' and file_type == 'log':
|
||||
path = f'storage/logs/backtest-mode/{session_id}.txt'
|
||||
filename = f'backtest-{session_id}.txt'
|
||||
elif mode == 'backtest' and file_type == 'chart':
|
||||
path = f'storage/charts/{session_id}.png'
|
||||
filename = f'backtest-{session_id}.png'
|
||||
elif mode == 'backtest' and file_type == 'csv':
|
||||
path = f'storage/csv/{session_id}.csv'
|
||||
filename = f'backtest-{session_id}.csv'
|
||||
elif mode == 'backtest' and file_type == 'json':
|
||||
path = f'storage/json/{session_id}.json'
|
||||
filename = f'backtest-{session_id}.json'
|
||||
elif mode == 'backtest' and file_type == 'full-reports':
|
||||
path = f'storage/full-reports/{session_id}.html'
|
||||
filename = f'backtest-{session_id}.html'
|
||||
elif mode == 'backtest' and file_type == 'tradingview':
|
||||
path = f'storage/trading-view-pine-editor/{session_id}.txt'
|
||||
filename = f'backtest-{session_id}.txt'
|
||||
else:
|
||||
raise Exception(f'Unknown file type: {file_type} or mode: {mode}')
|
||||
|
||||
return FileResponse(path=path, filename=filename, media_type='application/octet-stream')
|
||||
@@ -1,13 +1,11 @@
|
||||
import math
|
||||
import threading
|
||||
import time
|
||||
from datetime import timedelta
|
||||
from typing import Dict, List, Any, Union
|
||||
|
||||
import arrow
|
||||
import click
|
||||
import pydash
|
||||
from timeloop import Timeloop
|
||||
|
||||
import jesse.helpers as jh
|
||||
from jesse.exceptions import CandleNotFoundInExchange
|
||||
@@ -15,36 +13,9 @@ from jesse.models import Candle
|
||||
from jesse.modes.import_candles_mode.drivers import drivers
|
||||
from jesse.modes.import_candles_mode.drivers.interface import CandleExchange
|
||||
from jesse.services.db import store_candles
|
||||
from jesse.config import config
|
||||
from jesse.services.failure import register_custom_exception_handler
|
||||
from jesse.services.redis import sync_publish, process_status
|
||||
from jesse.store import store
|
||||
from jesse import exceptions
|
||||
from jesse.services.progressbar import Progressbar
|
||||
|
||||
|
||||
def run(exchange: str, symbol: str, start_date_str: str, skip_confirmation: bool = False, mode: str = 'candles') -> None:
|
||||
config['app']['trading_mode'] = mode
|
||||
|
||||
# first, create and set session_id
|
||||
store.app.set_session_id()
|
||||
|
||||
register_custom_exception_handler()
|
||||
|
||||
# close database connection
|
||||
from jesse.services.db import database
|
||||
database.open_connection()
|
||||
|
||||
# at every second, we check to see if it's time to execute stuff
|
||||
status_checker = Timeloop()
|
||||
|
||||
@status_checker.job(interval=timedelta(seconds=1))
|
||||
def handle_time():
|
||||
if process_status() != 'started':
|
||||
raise exceptions.Termination
|
||||
|
||||
status_checker.start()
|
||||
|
||||
def run(exchange: str, symbol: str, start_date_str: str, skip_confirmation: bool = False) -> None:
|
||||
try:
|
||||
start_timestamp = jh.arrow_to_timestamp(arrow.get(start_date_str, 'YYYY-MM-DD'))
|
||||
except:
|
||||
@@ -81,99 +52,78 @@ def run(exchange: str, symbol: str, start_date_str: str, skip_confirmation: bool
|
||||
click.confirm(
|
||||
f'Importing {days_count} days candles from "{exchange}" for "{symbol}". Duplicates will be skipped. All good?', abort=True, default=True)
|
||||
|
||||
progressbar = Progressbar(loop_length)
|
||||
for i in range(candles_count):
|
||||
temp_start_timestamp = start_date.int_timestamp * 1000
|
||||
temp_end_timestamp = temp_start_timestamp + (driver.count - 1) * 60000
|
||||
with click.progressbar(length=loop_length, label='Importing candles...') as progressbar:
|
||||
for _ in range(candles_count):
|
||||
temp_start_timestamp = start_date.int_timestamp * 1000
|
||||
temp_end_timestamp = temp_start_timestamp + (driver.count - 1) * 60000
|
||||
|
||||
# to make sure it won't try to import candles from the future! LOL
|
||||
if temp_start_timestamp > jh.now_to_timestamp():
|
||||
break
|
||||
# to make sure it won't try to import candles from the future! LOL
|
||||
if temp_start_timestamp > jh.now_to_timestamp():
|
||||
break
|
||||
|
||||
# prevent duplicates calls to boost performance
|
||||
count = Candle.select().where(
|
||||
Candle.timestamp.between(temp_start_timestamp, temp_end_timestamp),
|
||||
Candle.symbol == symbol,
|
||||
Candle.exchange == exchange
|
||||
).count()
|
||||
already_exists = count == driver.count
|
||||
# prevent duplicates calls to boost performance
|
||||
count = Candle.select().where(
|
||||
Candle.timestamp.between(temp_start_timestamp, temp_end_timestamp),
|
||||
Candle.symbol == symbol,
|
||||
Candle.exchange == exchange
|
||||
).count()
|
||||
already_exists = count == driver.count
|
||||
|
||||
if not already_exists:
|
||||
# it's today's candles if temp_end_timestamp < now
|
||||
if temp_end_timestamp > jh.now_to_timestamp():
|
||||
temp_end_timestamp = arrow.utcnow().floor('minute').int_timestamp * 1000 - 60000
|
||||
if not already_exists:
|
||||
# it's today's candles if temp_end_timestamp < now
|
||||
if temp_end_timestamp > jh.now_to_timestamp():
|
||||
temp_end_timestamp = arrow.utcnow().floor('minute').int_timestamp * 1000 - 60000
|
||||
|
||||
# fetch from market
|
||||
candles = driver.fetch(symbol, temp_start_timestamp)
|
||||
# fetch from market
|
||||
candles = driver.fetch(symbol, temp_start_timestamp)
|
||||
|
||||
# check if candles have been returned and check those returned start with the right timestamp.
|
||||
# Sometimes exchanges just return the earliest possible candles if the start date doesn't exist.
|
||||
if not len(candles) or arrow.get(candles[0]['timestamp'] / 1000) > start_date:
|
||||
click.clear()
|
||||
first_existing_timestamp = driver.get_starting_time(symbol)
|
||||
# check if candles have been returned and check those returned start with the right timestamp
|
||||
if not len(candles) or arrow.get(candles[0]['timestamp'] / 1000) > start_date:
|
||||
click.clear()
|
||||
first_existing_timestamp = driver.get_starting_time(symbol)
|
||||
|
||||
# if driver can't provide accurate get_starting_time()
|
||||
if first_existing_timestamp is None:
|
||||
raise CandleNotFoundInExchange(
|
||||
f'No candles exists in the market for this day: {jh.timestamp_to_time(temp_start_timestamp)[:10]} \n'
|
||||
'Try another start_date'
|
||||
)
|
||||
|
||||
# handle when there's missing candles during the period
|
||||
if temp_start_timestamp > first_existing_timestamp:
|
||||
# see if there are candles for the same date for the backup exchange,
|
||||
# if so, get those, if not, download from that exchange.
|
||||
if driver.backup_exchange is not None:
|
||||
candles = _get_candles_from_backup_exchange(
|
||||
exchange, driver.backup_exchange, symbol, temp_start_timestamp, temp_end_timestamp
|
||||
# if driver can't provide accurate get_starting_time()
|
||||
if first_existing_timestamp is None:
|
||||
raise CandleNotFoundInExchange(
|
||||
f'No candles exists in the market for this day: {jh.timestamp_to_time(temp_start_timestamp)[:10]} \n'
|
||||
'Try another start_date'
|
||||
)
|
||||
|
||||
# handle when there's missing candles during the period
|
||||
if temp_start_timestamp > first_existing_timestamp:
|
||||
# see if there are candles for the same date for the backup exchange,
|
||||
# if so, get those, if not, download from that exchange.
|
||||
if driver.backup_exchange is not None:
|
||||
candles = _get_candles_from_backup_exchange(
|
||||
exchange, driver.backup_exchange, symbol, temp_start_timestamp, temp_end_timestamp
|
||||
)
|
||||
|
||||
else:
|
||||
if not skip_confirmation:
|
||||
print(jh.color(f'No candle exists in the market for {jh.timestamp_to_time(temp_start_timestamp)[:10]}\n', 'yellow'))
|
||||
click.confirm(
|
||||
f'First present candle is since {jh.timestamp_to_time(first_existing_timestamp)[:10]}. Would you like to continue?', abort=True, default=True)
|
||||
|
||||
run(exchange, symbol, jh.timestamp_to_time(first_existing_timestamp)[:10], True)
|
||||
return
|
||||
|
||||
# fill absent candles (if there's any)
|
||||
candles = _fill_absent_candles(candles, temp_start_timestamp, temp_end_timestamp)
|
||||
|
||||
# store in the database
|
||||
if skip_confirmation:
|
||||
store_candles(candles)
|
||||
else:
|
||||
temp_start_time = jh.timestamp_to_time(temp_start_timestamp)[:10]
|
||||
temp_existing_time = jh.timestamp_to_time(first_existing_timestamp)[:10]
|
||||
sync_publish('alert', {
|
||||
'message': f'No candle exists in the market for {temp_start_time}. So '
|
||||
f'Jesse started importing since the first existing date which is {temp_existing_time}',
|
||||
'type': 'success'
|
||||
})
|
||||
run(exchange, symbol, jh.timestamp_to_time(first_existing_timestamp)[:10], True)
|
||||
return
|
||||
threading.Thread(target=store_candles, args=[candles]).start()
|
||||
|
||||
# fill absent candles (if there's any)
|
||||
candles = _fill_absent_candles(candles, temp_start_timestamp, temp_end_timestamp)
|
||||
# add as much as driver's count to the temp_start_time
|
||||
start_date = start_date.shift(minutes=driver.count)
|
||||
|
||||
# store in the database
|
||||
if skip_confirmation:
|
||||
store_candles(candles)
|
||||
else:
|
||||
threading.Thread(target=store_candles, args=[candles]).start()
|
||||
progressbar.update(1)
|
||||
|
||||
# add as much as driver's count to the temp_start_time
|
||||
start_date = start_date.shift(minutes=driver.count)
|
||||
|
||||
progressbar.update()
|
||||
sync_publish('progressbar', {
|
||||
'current': progressbar.current,
|
||||
'estimated_remaining_seconds': progressbar.estimated_remaining_seconds
|
||||
})
|
||||
|
||||
# sleep so that the exchange won't get angry at us
|
||||
if not already_exists:
|
||||
time.sleep(driver.sleep_time)
|
||||
|
||||
# stop the status_checker time loop
|
||||
status_checker.stop()
|
||||
|
||||
sync_publish('alert', {
|
||||
'message': f'Successfully imported candles since {jh.timestamp_to_date(start_timestamp)} until today ({days_count} days). ',
|
||||
'type': 'success'
|
||||
})
|
||||
|
||||
# if it is to skip, then it's being called from another process hence we should leave the database be
|
||||
if not skip_confirmation:
|
||||
# close database connection
|
||||
from jesse.services.db import database
|
||||
database.close_connection()
|
||||
# sleep so that the exchange won't get angry at us
|
||||
if not already_exists:
|
||||
time.sleep(driver.sleep_time)
|
||||
|
||||
|
||||
def _get_candles_from_backup_exchange(exchange: str, backup_driver: CandleExchange, symbol: str, start_timestamp: int,
|
||||
@@ -208,7 +158,8 @@ def _get_candles_from_backup_exchange(exchange: str, backup_driver: CandleExchan
|
||||
# try fetching from market now
|
||||
days_count = jh.date_diff_in_days(jh.timestamp_to_arrow(start_timestamp), jh.timestamp_to_arrow(end_timestamp))
|
||||
# make sure it's rounded up so that we import maybe more candles, but not less
|
||||
days_count = max(days_count, 1)
|
||||
if days_count < 1:
|
||||
days_count = 1
|
||||
if type(days_count) is float and not days_count.is_integer():
|
||||
days_count = math.ceil(days_count)
|
||||
candles_count = days_count * 1440
|
||||
@@ -285,8 +236,9 @@ def _get_candles_from_backup_exchange(exchange: str, backup_driver: CandleExchan
|
||||
return total_candles
|
||||
|
||||
|
||||
def _fill_absent_candles(temp_candles: List[Dict[str, Union[str, Any]]], start_timestamp: int, end_timestamp: int) -> List[Dict[str, Union[str, Any]]]:
|
||||
if not temp_candles:
|
||||
def _fill_absent_candles(temp_candles: List[Dict[str, Union[str, Any]]], start_timestamp: int, end_timestamp: int) -> \
|
||||
List[Dict[str, Union[str, Any]]]:
|
||||
if len(temp_candles) == 0:
|
||||
raise CandleNotFoundInExchange(
|
||||
f'No candles exists in the market for this day: {jh.timestamp_to_time(start_timestamp)[:10]} \n'
|
||||
'Try another start_date'
|
||||
@@ -299,7 +251,8 @@ def _fill_absent_candles(temp_candles: List[Dict[str, Union[str, Any]]], start_t
|
||||
started = False
|
||||
loop_length = ((end_timestamp - start_timestamp) / 60000) + 1
|
||||
|
||||
for _ in range(int(loop_length)):
|
||||
i = 0
|
||||
while i < loop_length:
|
||||
candle_for_timestamp = pydash.find(
|
||||
temp_candles, lambda c: c['timestamp'] == start_timestamp)
|
||||
|
||||
@@ -335,6 +288,8 @@ def _fill_absent_candles(temp_candles: List[Dict[str, Union[str, Any]]], start_t
|
||||
candles.append(candle_for_timestamp)
|
||||
|
||||
start_timestamp += 60000
|
||||
i += 1
|
||||
|
||||
return candles
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user