diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..e69de29 diff --git a/market_trade/core/CoreDraw.py b/market_trade/core/CoreDraw.py index f7b81ed..d764c83 100644 --- a/market_trade/core/CoreDraw.py +++ b/market_trade/core/CoreDraw.py @@ -15,7 +15,7 @@ import datetime import matplotlib.dates as mdates import matplotlib.pyplot as plt -import mplfinance as mpf +#import mplfinance as mpf import plotly #import plotly.plotly as py @@ -25,7 +25,7 @@ from plotly.offline import init_notebook_mode, iplot from plotly.subplots import make_subplots init_notebook_mode() -import market_trade.core.CoreTraidMath +import CoreTraidMath import plotly.express as px @@ -79,9 +79,10 @@ class corePlt(): class coreDraw(): - def __init__(self, data=[],needShow=False): + def __init__(self, data=[],needShow=False,subplot_titles={}): self.data=self.getPlts(data) self.needShow=needShow + self.subplot_titles=subplot_titles self.ans=self.getAns() @@ -156,11 +157,11 @@ class coreDraw(): rows=maxRow, cols=maxCol, shared_xaxes=True, - vertical_spacing=0.02, + vertical_spacing=0.1, shared_yaxes=True, - horizontal_spacing=0.02, + #horizontal_spacing=0.02, #column_widths=[] - + subplot_titles=self.subplot_titles ) @@ -188,7 +189,7 @@ class coreDraw(): except: colorType='normal' colors=self.getBarColorList(i.df[j],colorType) - fig.add_trace(go.Bar(x=i.df['date'], y=i.df[j],name=j,marker_color=colors)) + fig.add_trace(go.Bar(x=i.df['date'], y=i.df[j],name=j,marker_color=colors),row=i.row, col=i.col) @@ -196,4 +197,4 @@ class coreDraw(): ans=fig if self.needShow: plotly.offline.iplot(fig) - return ans + return ans \ No newline at end of file diff --git a/market_trade/core/CoreTraidMath.py b/market_trade/core/CoreTraidMath.py index 9f906c4..9aad5b6 100644 --- a/market_trade/core/CoreTraidMath.py +++ b/market_trade/core/CoreTraidMath.py @@ -1,93 +1,102 @@ + +import pandas as pd +import datetime import numpy as np +import plotly as pl +import plotly.graph_objs as go +import matplotlib.pyplot as plt +import math +import scipy +import random +import statistics + + +import datetime + + + class CoreMath: - - def __init__(self, base_df, params=None): - """ - Этот класс нужен для того, чтобы проводить операции над датафреймами - :param base_df: pandas.DataFrame , датафрейм, над которым будут проведены математические операции - :param params: словарь, который определяет какие данные пришли в класс, и что с ними нужно делать, и как - """ - if params is None: - params = { - 'dataType': 'ohcl', - 'action': None, - 'actionOptions': {} - } - - # нужно переопределить индексы, потому что нам ничего не известно об индексации входного файла - self.base_df = base_df.reset_index(drop=True) - - self.params = params - - # Эта часть определяет с какой частью данных нужно проводить вычисления + + def __init__(self, base_df, params={ + 'dataType':'ohcl', + 'action': None, + 'actionOptions':{} + } + ): + + self.base_df=base_df.reset_index(drop=True) + self.params=params if self.params['dataType']=='ohcl': self.col=self.base_df[self.params['actionOptions']['valueType']] elif self.params['dataType']=='series': self.col=self.base_df - - # собственно производим вычисления self.ans=self.getAns() - + + def getAns(self): - """ - Эта функция занимается собственно рутингом вычислений, в зависимости от параметров - :return: ans, неопределенный тип данных, в заивисимости от action - """ ans=None - - # в зависимости от параметра action производятся соответсвующие действия + if self.params['action']=='findExt': ans = self.getExtremumValue() elif self.params['action']=='findMean': ans = self.getMeanValue() elif self.params['action']=='findSTD': - ans = self.getSTD() - + ans=self.getSTD() + + return ans - - + + def getExtremumValue(self): - """ - Эта функция возвращает экстремум произвольного типа внутри одного столбца - Тип контролируется разделом внутри словаря параметров `self.params` по ключу `actionOptions`: - 'extremumtype': -- тип экстремума + ans=None + ''' + actionOptions: + 'extremumtype': 'min' 'max' - :return ans, экстремум произвольного типа - """ - ans=None + 'valueType': + 'open' + 'close' + 'high' + 'low' + ''' if self.params['actionOptions']['extremumtype']=='max': ans=max(self.col) - + if self.params['actionOptions']['extremumtype']=='min': ans=min(self.col) + return ans - + + def getMeanValue(self): - """ - Божественный код - Эта функция возвращает среднее значение одного из следующих типов. - Для определения типа используется словарь `self.params`, по ключу `actionOptions`, релевантные ключи выглядят - так: + ''' + actionOptions: 'MeanType': - 'MA' -- среднее по всему столбцу - 'SMA' -- скользящее среднее - 'EMA' -- экспоненциальное скользящее среднее - 'WMA' -- взвешенное скользящее среднее - 'window' -- размер окна - 'span' -- >=1 , аналог окна для экспоненциального среднего, чем он больше тем меньше коэффициент сглаживания - 'weights' -- numpy.ndarray, список размером в параметр `window`, конкретные веса для каждого элемента - """ - + 'MA' + 'SMA' + 'EMA' + 'WMA' + --'SMMA' + 'valueType': + 'open' + 'close' + 'high' + 'low' + 'window' + 'span' + 'weights' + ''' ans=None - if self.params['actionOptions']['MeanType']=='MA': ans = self.col.mean() if self.params['actionOptions']['MeanType']=='SMA': ans=np.convolve(self.col, np.ones(self.params['actionOptions']['window']), 'valid') / self.params['actionOptions']['window'] + #ans=self.col.rolling(window=self.params['actionOptions']['window']).mean().to_list() + if self.params['actionOptions']['MeanType']=='EMA': ans=self.col.ewm(span=self.params['actionOptions']['span'], adjust=False).mean().to_list() if self.params['actionOptions']['MeanType']=='WMA': @@ -97,8 +106,10 @@ class CoreMath: weights=np.arange(1,self.params['actionOptions']['window']+1) ans=self.col.rolling(window=self.params['actionOptions']['window']).apply(lambda x: np.sum(weights*x) / weights.sum(), raw=False).to_list() - return ans - + + + return(ans) + def getSTD(self): ''' actionOptions: @@ -109,7 +120,7 @@ class CoreMath: ''' - + ans=None @@ -117,11 +128,11 @@ class CoreMath: window=self.params['actionOptions']['window'] ans=np.asarray([]) for i in range(len(self.col)-window+1): - ans=np.append(ans, np.std(self.col[i:i+window], ddof=1)) - + ans=np.append(ans,np.std(self.col[i:i+window], ddof=1)) + except: #window = len(self.col) ans=np.std(self.col, ddof=1) return ans - + \ No newline at end of file diff --git a/market_trade/core/dealManager.py b/market_trade/core/dealManager.py new file mode 100644 index 0000000..bc47a9f --- /dev/null +++ b/market_trade/core/dealManager.py @@ -0,0 +1,49 @@ +import pandas as pd +import datetime +import numpy as np +import uuid + +class DealManager(): + + def __init__(self): + #self.commission=0.04 + self.columns=['uuid','figi','amount','startPrice'] + self.deals = pd.DataFrame(columns=self.columns) + self.deals = self.deals.set_index('uuid') + + def findDealByPriceAndFig(self,price,figi): + ans=None + for i in range(self.deals.shape[0]): + if self.deals.iloc[i].startPrice == price and self.deals.iloc[i].figi == figi: + ans = self.deals.iloc[i].name + break + return ans + + def openDeal(self,figi,startPrice,amount=1): + desiredDeal=self.findDealByPriceAndFig(startPrice,figi) + if desiredDeal == None: + newDealDict={ + 'uuid':[str(uuid.uuid4())], + 'figi':[figi], + 'startPrice':[startPrice], + 'amount':[amount] + } + + #newDealDict['profit']=[startPrice*pow(1+self.commission,2)] + + + + newDeal=pd.DataFrame.from_dict(newDealDict).set_index('uuid') + self.deals=pd.concat([self.deals, newDeal]) + else: + self.deals.at[desiredDeal,'amount'] += amount + + def closeDeal(self,uuid,amount): + + desiredDeal=self.deals.loc[uuid] + if desiredDeal.amount - amount == 0: + self.deals = self.deals.drop(labels = [uuid],axis = 0) + else: + self.deals.at[uuid,'amount'] -= amount + #self.deals.loc[uuid].amount = desiredDeal.amount - amount + \ No newline at end of file diff --git a/market_trade/core/decisionManager.py b/market_trade/core/decisionManager.py new file mode 100644 index 0000000..2ae438a --- /dev/null +++ b/market_trade/core/decisionManager.py @@ -0,0 +1,116 @@ +import pandas as pd +import datetime +import numpy as np + +import pickle +from signals import * +from dealManager import * +from trandeVoter import * +from riskManager import riskManager + + +class decsionManager(): + + + def __init__(self,name): + self.name = name + self.RM = riskManager() + self.DM = DealManager() + self.TV = trandeVoter(name) + self.SA = signalAgrigator() + pass + + #вытащенный из signalAgrigator метод теста для сигналов + def getSignalTest(self,data: pd.DataFrame(),reqSig: dict, batchSize=30, dataType='candel') -> dict: + + self.SA.mode = 'retroFast' + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=30 + ) + ans = t.SA.getAns(data) + return ans + + #метод для генерации матрицы вероятностей. + def generateMatrixProbability(self, + data: pd.DataFrame(), + reqSig: dict, + target: str, + batchSize=30, + #dataType='candel' + ): + data=data.reset_index(drop=True) + + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=batchSize + ) + self.TV.createMatrixAmounts(reqSig.keys()) + for i in range(data.shape[0]-batchSize-1): + sigAns=self.SA.getAns(data[i:i+batchSize]) + rightAns=self.getRetroStepAns(data[target][i],data[target][i+1]) + self.TV.setDecisionBySignals(self.KostilEbaniy(sigAns),rightAns) + self.TV.generateMatrixProbability() + + #без коментариев блять + def KostilEbaniy(self,d): + ans={} + for i in d.keys(): + if d[i] == 0: + ans[i] = 'none' + elif d[i] == 1: + ans[i] = 'up' + elif d[i] == -1: + ans[i] = 'down' + return ans + + #тож понятная хуита + def getRetroStepAns(self, value1,value2): + + if value1 == value2: + ans = 'none' + elif value1 < value2: + ans = 'up' + else: + ans = 'down' + return ans + + #метод для онлай получения решения по сигналу + def getSignal(self,data: pd.DataFrame(),reqSig: dict, dataType='candel') -> dict: + data=data.reset_index(drop=True) + self.SA.mode = 'online' + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=30 + ) + ans = t.SA.getAns(data) + return ans + + + #Создание сигналов. Вызывать перед getOnlineAns + def crateSignals(self,data: pd.DataFrame(),reqSig: dict, dataType='candel'): + data=data.reset_index(drop=True) + self.SA.mode = 'online' + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=30 + ) + + + def getOnlineAns(self,data: pd.DataFrame(),price): + sigAns = self.SA.getAns(data) + prob = self.TV.getDecisionBySignals(sigAns) + ans = self.RM.getDecision(sigAns,prob,price) + return ans + + + + diff --git a/market_trade/core/decisionManager_v2.py b/market_trade/core/decisionManager_v2.py new file mode 100644 index 0000000..e609ab1 --- /dev/null +++ b/market_trade/core/decisionManager_v2.py @@ -0,0 +1,161 @@ +import os + +import pandas as pd +import datetime +import numpy as np + +from tqdm import tqdm + +from indicators_v2 import * +from signals_v2 import * +from dealManager import * +from trandeVoter import * +from riskManager import * +import pickle + + +class decsionManager: + ''' +sigAgrReq = { + 'sig_BB':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2.5} + } + } + }, + 'sig_BB_2':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2} + } + } + } +} + +sigAgrData = { + 'sig_BB':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + }, + 'sig_BB_2':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + } +} + + +sigAgrRetroTemplate = { + 'sig_BB':{ + 'signalData': None, + 'indicatorData' :{'ind_BB': None} + }, + 'sig_BB_2':{ + 'signalData': None, + 'indicatorData' :{'ind_BB': None} + } +} + + + + + + + ''' + + + + + def __init__(self,name, sigDict: dict): + self.RM = riskManager() + self.DM = DealManager() + self.TV = trandeVoter(name) + self.SA = signalsAgrigator(sigDict) + self.sigDict = sigDict + + + def getOnlineAns(self, signalsAns: dict, price: float) -> dict: + probabilityDecsion = self.TV.getDecisionBySignals(self.getSignalsAns(signalsAns)) + RMD = self.RM.getDecision(probabilityDecision=probabilityDecsion, price=price, deals = self.DM.deals) + return RMD + + def getSignalsAns(self, signalsDataDict: dict) -> dict: + return self.SA.getAns(signalsDataDict) + + def getRightAns(self,value_1, value_2): + + ans='' + + if value_1 > value_2: + ans = 'down' + elif value_1 < value_2: + ans = 'up' + else: + ans = 'none' + + return ans + + def getRetroTrendAns(self, retroTemplateDict: dict, data: pd.DataFrame(), window: int) -> list: + + reqSig={} + ans = { + 'signalsAns':[], + 'rightAns':[] + + } + target = '' + + + for k in tqdm(range(data.shape[0]-window-1)): + for i in retroTemplateDict.keys(): + reqSig[i] = {'signalData': data[k:k+window], 'indicatorData':{}} + target = self.SA.signals[i].params['target'] + for j in retroTemplateDict[i]['indicatorData'].keys(): + reqSig[i]['indicatorData'][j] = data[k:k+window] + + sigAns = self.getSignalsAns(reqSig) + rightAns = self.getRightAns(data[target][k], data[target][k+1]) + + ans['signalsAns'].append(sigAns) + ans['rightAns'].append(rightAns) + + return ans + + + def generateMatrixProbabilityFromDict(self, dictSignals: dict) -> dict: + self.TV.createMatrixAmounts(dictSignals['signalsAns'][0].keys()) + for i in range(len(dictSignals['signalsAns'])): + self.TV.setDecisionBySignals(signalDecisions = dictSignals['signalsAns'][i], + trande = dictSignals['rightAns'][i]) + self.TV.generateMatrixProbability() + + def createDump(self,postfix='') -> str: + dataDict = { + 'RM':self.RM, + 'DM':self.DM, + 'TV':self.TV, + 'SA':self.SA, + 'sigDict':self.sigDict + } + fileName='data_'+postfix+'.pickle' + with open(fileName, 'wb') as f: + pickle.dump(dataDict, f) + + return os.path.abspath(fileName) + + def loadDump(self,path: str) -> None: + + with open(path, 'rb') as f: + dataDict = pickle.load(f) + + self.RM = dataDict['RM'] + self.DM = dataDict['DM'] + self.TV = dataDict['TV'] + self.SA = dataDict['SA'] + self.sigDict = dataDict['sigDict'] \ No newline at end of file diff --git a/market_trade/core/indicators.py b/market_trade/core/indicators.py index 7294d36..ade4d59 100644 --- a/market_trade/core/indicators.py +++ b/market_trade/core/indicators.py @@ -2,8 +2,8 @@ import pandas as pd import datetime import numpy as np -import market_trade.core.CoreTraidMath as CoreTraidMath -import market_trade.core.CoreDraw as CoreDraw +import CoreTraidMath +import CoreDraw class coreIndicator(): def __init__(self, diff --git a/market_trade/core/indicators_v2.py b/market_trade/core/indicators_v2.py new file mode 100644 index 0000000..d4103c8 --- /dev/null +++ b/market_trade/core/indicators_v2.py @@ -0,0 +1,89 @@ +import pandas as pd +import datetime +import numpy as np + +import CoreTraidMath + +class coreIndicator(): + + def __init__(self,options: dict, dataType: str = None, predictType: str = None, name: str = None): + self.options = options + self.dataType = dataType #ochl + self.predictType = predictType #trend + + + def getAns(self, data: pd.DataFrame() ): + return "ERROR" + +class indicatorsAgrigator: + """ + indicators = { + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5} + } + } + dataDic={ + 'ind_BB':df_candle[:1000] + } + + + """ + + def __init__ (self,indDict={}): + self.indDict = indDict + self.indInst = {} + self.ans={} + self.createIndicatorsInstance() + + def createIndicatorsInstance(self): + for i in self.indDict.keys(): + self.indInst[i]=self.indDict[i]['className'](self.indDict[i]['params']) + + def getAns(self,dataDict={}): + ans={} + for i in dataDict.keys(): + ans[i] = self.indInst[i].getAns(dataDict[i]) + return ans + +class ind_BB(coreIndicator): + """ + options + MeanType -> SMA + window -> int + valueType -> str: low, high, open, close + kDev -> float + + """ + + def __init__(self,options: dict,name = None): + super().__init__( + options = options, + dataType = 'ochl', + predictType = 'trend', + name = name + ) + + def getAns(self, data: pd.DataFrame()): + data=data.reset_index(drop=True) + ans={} + opMA={'dataType':'ohcl', + 'action':'findMean', + 'actionOptions':{ + 'MeanType':self.options['MeanType'], + 'valueType':self.options['valueType'], + 'window':self.options['window'] + } + } + ans['BB']=CoreTraidMath.CoreMath(data,opMA).ans + opSTD={'dataType':'ohcl', + 'action':'findSTD', + 'actionOptions':{'valueType':self.options['valueType'],'window':self.options['window']} + } + ans['STD']=CoreTraidMath.CoreMath(data,opSTD).ans + ans['pSTD']=ans['BB']+ans['STD']*self.options['kDev'] + ans['mSTD']=ans['BB']-ans['STD']*self.options['kDev'] + ans['x']=np.array(data['date'][self.options['window']-1:].to_list()) + self.ans= ans + return ans + \ No newline at end of file diff --git a/market_trade/core/riskManager.py b/market_trade/core/riskManager.py new file mode 100644 index 0000000..9042581 --- /dev/null +++ b/market_trade/core/riskManager.py @@ -0,0 +1,29 @@ +import pandas as pd +import datetime +import numpy as np +import random + +class riskManager: + + def __init__(self,commision=0.04): + self.commision = commision + pass + def getDecision(self,probabilityDecision, price, deals=None) -> dict: + ans = {} + ans['decision'] = 'none' + if probabilityDecision['trande'] == 'up': + ans['decision'] = 'buy' + ans['amount'] = 1 + elif probabilityDecision['trande'] == 'none': + ans['decision'] = 'none' + elif probabilityDecision['trande'] == 'down': + for i in range(deals.shape[0]): + ans['decision'] = 'None' + ans['deals'] = [] + row = deals.iloc[i] + if row.startPrice < price*pow(1+self.commission,2): + ans['decision'] = 'sell' + ans['deals'].append(row.name) + return ans + + diff --git a/market_trade/core/signals.py b/market_trade/core/signals.py index c0af03b..e33db24 100644 --- a/market_trade/core/signals.py +++ b/market_trade/core/signals.py @@ -2,12 +2,11 @@ import pandas as pd import datetime import numpy as np -import market_trade.core.CoreTraidMath as CoreTraidMath -import market_trade.core.CoreDraw as CoreDraw +import CoreTraidMath +import CoreDraw from tqdm import tqdm -from market_trade.core.indicators import * - +from indicators import * class coreSignalTrande(): def __init__(self, @@ -17,140 +16,133 @@ class coreSignalTrande(): batchSize=None, indParams=None, signalParams=None, - # needFig=False, - # showOnlyIndex=False, - # drawFig=False, - # equalityGap=0 - ): - - self.data = data.reset_index(drop=True) - self.onlineData = data.reset_index(drop=True) - self.dataType = dataType - self.mode = mode - self.ans = None - self.softAnalizList = np.asarray([]) - self.hardAnalizList = np.asarray([]) - self.analizMetrics = {} - self.indParams = indParams - self.signalParams = signalParams - self.batchSize = batchSize - # self.needFig=needFig - # self.showOnlyIndex=showOnlyIndex - # self.drawFig=drawFig - # self.equalityGap=equalityGap - - # Роутер получения ответа - def getAns(self, data): - # ans='Error: unknown Mode!' - ans = None + #needFig=False, + #showOnlyIndex=False, + #drawFig=False, + #equalityGap=0 + ): + + self.data=data.reset_index(drop=True) + self.onlineData=data.reset_index(drop=True) + self.dataType=dataType + self.mode=mode + self.ans=None + self.softAnalizList=np.asarray([]) + self.hardAnalizList=np.asarray([]) + self.analizMetrics={} + self.indParams=indParams + self.signalParams=signalParams + self.batchSize=batchSize + #self.needFig=needFig + #self.showOnlyIndex=showOnlyIndex + #self.drawFig=drawFig + #self.equalityGap=equalityGap + #Роутер получения ответа + def getAns(self,data): + #ans='Error: unknown Mode!' + ans=None print("Start processing...") if self.mode == 'online': - ans = self.getOnlineAns(data.reset_index(drop=True)) + ans=self.getOnlineAns(data.reset_index(drop=True)) elif self.mode == 'retro': - ans = self.getRetroAns(data) + ans=self.getRetroAns(data) elif self.mode == 'retroFast': - ans = self.getRetroFastAns(data) + ans=self.getRetroFastAns(data) print("Processing DONE!") return ans - - # Ретро режим, где расширяется окно добавлением новых элементов - def getRetroAns(self, data): - ans = np.asarray([]) - for i in tqdm(range(self.batchSize, len(data) - 1)): - # self.onlineData=self.data[0:i] + #Ретро режим, где расширяется окно добавлением новых элементов + def getRetroAns(self,data): + ans=np.asarray([]) + for i in tqdm(range(self.batchSize,len(data)-1)): + #self.onlineData=self.data[0:i] window_data = data[0:i] window_data.reset_index(drop=True) - ans = np.append(ans, (self.getOnlineAns(window_data))) - self.ans = ans + ans=np.append(ans,(self.getOnlineAns(window_data))) + self.ans=ans self.getAnaliz() self.getMetrix() return ans - - # Ретро режим, где двигается окно - def getRetroFastAns(self, data): - # print('d - ',data) - ans = np.asarray([]) - for i in tqdm(range(len(data) - 1 - self.batchSize)): - # self.onlineData=self.data[i:i+self.batchSize] - window_data = data[i:i + self.batchSize] - # print('win - ',window_data) + #Ретро режим, где двигается окно + def getRetroFastAns(self,data): + #print('d - ',data) + ans=np.asarray([]) + for i in tqdm(range(len(data)-1-self.batchSize)): + #self.onlineData=self.data[i:i+self.batchSize] + window_data = data[i:i+self.batchSize] + #print('win - ',window_data) window_data.reset_index(drop=True) - # print('win - ',window_data) - ans = np.append(ans, (self.getOnlineAns(window_data))) - self.ans = ans + #print('win - ',window_data) + ans=np.append(ans,(self.getOnlineAns(window_data))) + self.ans=ans self.getAnaliz() self.getMetrix() return ans - - # Метод, который будет переопределять каждый дочерний класс + #Метод, который будет переопределять каждый дочерний класс def getOnlineAns(self): return 'Error' - def getAnaliz(self): print("Start analiz...") for i in (range(len(self.ans))): - sourceValue = self.data[self.signalParams['source']][i + self.batchSize] - targetValue = self.data[self.signalParams['target']][i + self.batchSize + 1] - if (targetValue) > sourceValue: - if self.ans[i] == 1: - self.softAnalizList = np.append(self.softAnalizList, 1) - self.hardAnalizList = np.append(self.hardAnalizList, 1) - elif self.ans[i] == -1: - self.softAnalizList = np.append(self.softAnalizList, -1) - self.hardAnalizList = np.append(self.hardAnalizList, -1) + sourceValue=self.data[self.signalParams['source']][i+self.batchSize] + targetValue=self.data[self.signalParams['target']][i+self.batchSize + 1] + if (targetValue)>sourceValue: + if self.ans[i]==1: + self.softAnalizList=np.append(self.softAnalizList,1) + self.hardAnalizList=np.append(self.hardAnalizList,1) + elif self.ans[i]==-1: + self.softAnalizList=np.append(self.softAnalizList,-1) + self.hardAnalizList=np.append(self.hardAnalizList,-1) else: - self.softAnalizList = np.append(self.softAnalizList, 0) - self.hardAnalizList = np.append(self.hardAnalizList, -1) - - elif (targetValue) < sourceValue: - if self.ans[i] == 1: - self.softAnalizList = np.append(self.softAnalizList, -1) - self.hardAnalizList = np.append(self.hardAnalizList, -1) - elif self.ans[i] == -1: - self.softAnalizList = np.append(self.softAnalizList, 1) - self.hardAnalizList = np.append(self.hardAnalizList, 1) + self.softAnalizList=np.append(self.softAnalizList,0) + self.hardAnalizList=np.append(self.hardAnalizList,-1) + + elif (targetValue) self.BB.ans['pSTD'][-1]: - ans = -1 - elif lastValue < self.BB.ans['mSTD'][-1]: - ans = +1 + #print(BB) + lastValue=data[self.signalParams['source']].to_list()[-1] + if lastValue>self.BB.ans['pSTD'][-1]: + ans=-1 + elif lastValue dict: + return indicatorsAgrigator(req['indicators']) + + def getIndAns(self, dataDict: dict) -> dict: + return self.agrigateInds.getAns(dataDict) + + def getAns(self, data: pd.DataFrame(), indDataDict: dict) -> dict: + return self.getSigAns(data, self.getIndAns(indDataDict)) + + + +class sig_BB(coreSignalTrande): + """ + ind keys: + ind_BB + """ + + def __init__(self, name: str, req:dict): + super().__init__(name, req, 'ochl') + + def getSigAns(self, data: pd.DataFrame(), indAnsDict: dict) -> dict: + + lastValue = data[self.params['source']].to_list()[-1] + if lastValue>indAnsDict['ind_BB']['pSTD'][-1]: + ans='down' + elif lastValue dict: + ans = {} + for i in siganlsDict.keys(): + ans[i]=siganlsDict[i]['className'](name = i, req = siganlsDict[i]) + return ans + + def getAns(self, dataDict: dict) -> dict: + ans = {} + for i in dataDict.keys(): + ans[i] = self.signals[i].getAns(data = dataDict[i]['signalData'], + indDataDict = dataDict[i]['indicatorData']) + return ans \ No newline at end of file diff --git a/market_trade/core/trandeVoter.py b/market_trade/core/trandeVoter.py new file mode 100644 index 0000000..da23070 --- /dev/null +++ b/market_trade/core/trandeVoter.py @@ -0,0 +1,83 @@ +import pandas as pd +import datetime +import numpy as np +#import random + +class trandeVoter(): + + def __init__(self,name): + + self.name = name # просто имя + self.trandeValuesList = ['up','none','down'] #словарь трегдов + self.matrixAmounts = None # матрица сумм + self.keysMatrixAmounts = None #ключи матрицы сумм, техническое поле + self.matrixProbability = None # матрица вероятностей + + + #функция которая создает df с заданным набором колонок и индексов. индексы - уникальные соотношения + def createDFbyNames(self, namesIndex, namesColoms,defaultValue=0.0): + df = pd.DataFrame(dict.fromkeys(namesColoms, [defaultValue]*pow(3,len(namesIndex))), + index=pd.MultiIndex.from_product([self.trandeValuesList]*len(namesIndex), names=namesIndex) + #,columns=namesColoms + ) + return(df) + + #создание матрицы сумм с дефолтным значением + def createMatrixAmounts(self,namesIndex: list) -> pd.DataFrame(): + self.matrixAmounts = self.createDFbyNames(namesIndex,self.trandeValuesList,0) + self.keysMatrixAmounts = self.matrixAmounts.to_dict('tight')['index_names'] + self.createMatrixProbability(namesIndex) + return(self.matrixAmounts) + + #создание матрицы вероятностей с дефолтным значением + def createMatrixProbability(self,namesIndex: list) -> pd.DataFrame(): + self.matrixProbability = self.createDFbyNames(namesIndex,self.trandeValuesList) + return(self.matrixProbability) + + #установка значений в матрицы сумм. signalDecisions - значения индикаторов key:value; trande - реальное значение + def setDecisionBySignals(self,signalDecisions: dict,trande: str) -> None: + buff=[] + for i in self.keysMatrixAmounts: + buff.append(signalDecisions[i]) + self.matrixAmounts.loc[tuple(buff),trande] += 1 + + #заполнение матрицы вероятностей вычисляемыми значениями из матрицы сумм + def generateMatrixProbability(self) -> None: + for i in range(self.matrixAmounts.shape[0]): + rowSum=sum(self.matrixAmounts.iloc[i]) + self.matrixProbability.iloc[i]['up'] = (self.matrixAmounts.iloc[i]['up'] / rowSum) + self.matrixProbability.iloc[i]['none'] = self.matrixAmounts.iloc[i]['none'] / rowSum + self.matrixProbability.iloc[i]['down'] = self.matrixAmounts.iloc[i]['down'] / rowSum + + #получение рещения из матрицы вероятностей по заданным значениям сигналов + def getDecisionBySignals(self,signalDecisions: dict) -> dict: + ans = {} + spliceSearch =self.matrixProbability.xs(tuple(signalDecisions.values()), + level=list(signalDecisions.keys()) + ) + ans['probability'] = spliceSearch.to_dict('records')[0] + ans['trande'] = spliceSearch.iloc[0].idxmax() + return ans + + #получение матриц вероятностей и суммы в видей словарей + def getMatrixDict(self) -> dict: + ans={} + ans['amounts'] = self.matrixAmounts.to_dict('tight') + ans['probability'] = self.matrixProbability.to_dict('tight') + return ans + + #установка матриц вероятностей и суммы в видей словарей + def setMatrixDict(self,matrixDict: dict) -> dict: + if matrixDict['amounts'] != None: + self.matrixAmounts = pd.DataFrame.from_dict(y['amounts'], orient='tight') + if matrixDict['probability'] != None: + self.matrixProbability = pd.DataFrame.from_dict(y['probability'], orient='tight') + + + + + + + + + \ No newline at end of file diff --git a/market_trade/tests/test.py b/market_trade/tests/test.py index e2aa3fc..34a65ee 100644 --- a/market_trade/tests/test.py +++ b/market_trade/tests/test.py @@ -4,7 +4,7 @@ import pandas as pd if __name__ == '__main__': - df_candle = pd.read_csv(market_trade.src.constants.TEST_CANDLESTICKS_PATH) + df_candle = pd.read_csv(market_trade.constants.TEST_CANDLESTICKS_PATH) df_candle.rename(columns={'timestamp': 'date'}, inplace=True) ind_params = {'MeanType': 'SMA', 'window': 15, 'valueType': 'close', 'kDev': 2.5} signalParams = {'source': 'close', 'target': 'close'} diff --git a/market_trade/tests/test_dataloader.py b/market_trade/tests/test_dataloader.py index 5b26c2a..2624036 100644 --- a/market_trade/tests/test_dataloader.py +++ b/market_trade/tests/test_dataloader.py @@ -3,11 +3,11 @@ import market_trade.constants def test_dataloader(data_path): - duka_interface = (market_trade.src.dataloader.DukaMTInterface(data_path)) + duka_interface = (market_trade.data.dataloader.DukaMTInterface(data_path)) print(duka_interface.ask_candlesticks) if __name__ == '__main__': - candlesticks_filepaths = [filepath for filepath in market_trade.src.constants.CANDLESTICK_DATASETS_PATH.iterdir()] + candlesticks_filepaths = [filepath for filepath in market_trade.constants.CANDLESTICK_DATASETS_PATH.iterdir()] candlesticks_filepath = candlesticks_filepaths[0] test_dataloader(candlesticks_filepath) \ No newline at end of file diff --git a/market_trade/tests/test_decision.py b/market_trade/tests/test_decision.py new file mode 100644 index 0000000..5697a7d --- /dev/null +++ b/market_trade/tests/test_decision.py @@ -0,0 +1 @@ +im \ No newline at end of file diff --git a/notebooks/autogen/Indicators.py b/notebooks/autogen/Indicators.py new file mode 100644 index 0000000..f519b11 --- /dev/null +++ b/notebooks/autogen/Indicators.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[2]: + + +import pandas as pd +import datetime +import numpy as np +#import plotly as pl + +#import plotly.graph_objs as go +#from plotly.offline import init_notebook_mode, iplot +#from plotly.subplots import make_subplots +#init_notebook_mode() + +import CoreTraidMath +import CoreDraw + + +# In[3]: + + +class coreIndicator(): + def __init__(self, + data=pd.DataFrame(), + options={}, + showMode='None', + ): + ''' + showMode = None/Ind/PartOf + ''' + self.data=data + self.showMode=showMode + self.options=options + self.overlayInd=None #True/False + self.ans=None + self.figDict=None + + def getAns(self,data=None): + if type(data)!=type(None): + self.data=data + self.ans=self.getCalculate() + if self.showMode=='Ind' or self.showMode=='PartOf': + self.figDict=self.getFigDict() + if self.showMode=='Ind': + self.getFig() + return self.ans + def getFig(self,row=1): + CoreDraw.coreDraw(self.figDict,True) + def getCalculate(self): + return "Error" + def getFigDict(self): + return "Error" + +class indicatorAgrigator(): + ''' + Тема чисто для отладки + jj=indicatorAgrigator().runAll([o1,o2],df_candle[:30]) + #jj.createIndFromList([o1,o2]) + #jj.calculateInd(df_candle[:30]) + + ''' + def __init__(self): + self.indList=None + self.data=None + def createInd(self,classDict): + return classDict['name']( + options=classDict['params'], + showMode=classDict['showMode'] + ) + + + def createIndFromList(self,indList): + self.indList=indList + ans=[] + for i in self.indList: + ans.append(self.createInd(i)) + self.indList=ans + return ans + + def calculateInd(self,data): + self.data=data + for i in self.indList: + #i.getAns(data) + i.data=self.data + i.ans=i.getCalculate() + i.figDict=i.getFigDict() + #i.getFig() + def agrigateFig(self): + req=[[]] + + for i in self.indList: + if i.overlayInd==True: + req[0].append(i) + else: + req.append([i]) + CoreDraw.agrigateFig(req,True) + def runAll(self,indList,df,needDraw=False): + self.createIndFromList(indList) + self.calculateInd(df) + if needDraw: + self.agrigateFig() + + + + + +# In[4]: + + +class ind_BB(coreIndicator): + + def getCalculate(self): + self.overlayInd=True + ans={} + opMA={'dataType':'ohcl', + 'action':'findMean', + 'actionOptions':{ + 'MeanType':self.options['MeanType'], + 'valueType':self.options['valueType'], + 'window':self.options['window'] + } + } + ans['BB']=CoreTraidMath.CoreMath(self.data,opMA).ans + opSTD={'dataType':'ohcl', + 'action':'findSTD', + 'actionOptions':{'valueType':self.options['valueType'],'window':self.options['window']} + } + ans['STD']=CoreTraidMath.CoreMath(self.data,opSTD).ans + ans['pSTD']=ans['BB']+ans['STD']*self.options['kDev'] + ans['mSTD']=ans['BB']-ans['STD']*self.options['kDev'] + ans['x']=np.array(self.data['date'][self.options['window']-1:].to_list()) + return ans + def getFigDict(self,row=1): + req=[] + + req.append({ + 'vtype':'Scatter', + 'df':pd.DataFrame( + {'value':self.ans['BB'],'date':self.ans['x']}) , + 'row':row, + 'col':1, + 'name':'BB' + + }) + req.append({ + 'vtype':'Scatter', + 'df':pd.DataFrame( + {'value':self.ans['pSTD'],'date':self.ans['x']}) , + 'row':row, + 'col':1, + 'name':'pSTD' + + }) + req.append({ + 'vtype':'Scatter', + 'df':pd.DataFrame( + {'value':self.ans['mSTD'],'date':self.ans['x']}) , + 'row':row, + 'col':1, + 'name':'mSTD' + + }) + + return req + + +# In[5]: + + +class ind_OCHL(coreIndicator): + def getCalculate(self): + self.overlayInd=True + def getFigDict(self,row=1): + req=[] + + req.append({ + 'vtype':'OCHL', + 'df':self.data, + 'row':1, + 'col':1, + 'name':'OHCL' + + }) + return req + + +# In[7]: + + +df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv") +df_candle.rename(columns={'timestamp': 'date'}, inplace=True) +df_candle + + +# In[8]: + + +o1={ + 'name':ind_OCHL, + 'params':{}, + 'showMode':'PartOf', +} +o2={ + 'name':ind_BB, + 'params':{'MeanType':'SMA','window':25,'valueType':'low','kDev':2}, + 'showMode':'PartOf', +} +jj=indicatorAgrigator().runAll([o1,o2],df_candle[:300],True) +#jj.createIndFromList([o1,o2]) +#jj.calculateInd(df_candle[:30]) + + +# In[9]: + + +op={'MeanType':'SMA','window':5,'valueType':'low','kDev':2} +a=ind_BB(df_candle[:100],op,'PartOf') + + +# In[10]: + + +a.getAns() + + +# In[11]: + + +b=ind_OCHL(df_candle[:30],{},'Ind') +b.getAns(df_candle[:100]) + + +# In[12]: + + +opc={'MeanType':'SMA','window':20,'valueType':'low','kDev':2} +c=ind_BB(df_candle[:100],opc,'PartOf') +c.getAns() + + +# In[13]: + + +hhh = CoreDraw.agrigateFig([[b,a,c]],True) + + +# In[14]: + + +import indicators + + +# In[15]: + + +op_1={'MeanType':'SMA','window':5,'valueType':'low','kDev':2} +test_1=indicators.ind_BB(df_candle[:100],op) +test_1.getAns() + + +# In[ ]: + + + + diff --git a/notebooks/autogen/RiskManager.py b/notebooks/autogen/RiskManager.py new file mode 100644 index 0000000..d25f4b6 --- /dev/null +++ b/notebooks/autogen/RiskManager.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import pandas as pd +import datetime +import numpy as np +import random + + +# In[2]: + + +class riskManager: + + def __init__(self,commision=0.04): + self.commision = commision + pass + def getDecision(self,signalDecision,probabilityDecision, price, deals=None) -> dict: + ans = {} + if probabilityDecision['trande'] == 'up': + ans['decision'] = 'buy' + ans['amount'] = 1 + elif probabilityDecision['trande'] == 'none': + ans['decision'] = 'none' + elif probabilityDecision['trande'] == 'down': + for i in deals.shape[0]: + ans['decision'] = 'None' + ans['deals'] = [] + row = deals.iloc[i] + if row.startPrice < price*pow(1+self.commission,2): + ans['decision'] = 'sell' + ans['deals'].append(row.name) + return ans + + + + +# In[ ]: + + + + diff --git a/notebooks/autogen/Signals.py b/notebooks/autogen/Signals.py new file mode 100644 index 0000000..07baaa7 --- /dev/null +++ b/notebooks/autogen/Signals.py @@ -0,0 +1,390 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import pandas as pd +import datetime +import numpy as np + +import CoreTraidMath +import CoreDraw +from tqdm import tqdm + +from indicators import * + + +# In[2]: + + +df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv") +df_candle.rename(columns={'timestamp': 'date'}, inplace=True) +df_candle + + +# In[3]: + + +class coreSignalTrande(): + def __init__(self, + data=pd.DataFrame(), + dataType='candel', + mode='online', + batchSize=None, + indParams=None, + signalParams=None, + #needFig=False, + #showOnlyIndex=False, + #drawFig=False, + #equalityGap=0 + ): + + self.data=data.reset_index(drop=True) + self.onlineData=data.reset_index(drop=True) + self.dataType=dataType + self.mode=mode + self.ans=None + self.softAnalizList=np.asarray([]) + self.hardAnalizList=np.asarray([]) + self.analizMetrics={} + self.indParams=indParams + self.signalParams=signalParams + self.batchSize=batchSize + #self.needFig=needFig + #self.showOnlyIndex=showOnlyIndex + #self.drawFig=drawFig + #self.equalityGap=equalityGap + #Роутер получения ответа + def getAns(self,data): + #ans='Error: unknown Mode!' + ans=None + print("Start processing...") + if self.mode == 'online': + ans=self.getOnlineAns(data.reset_index(drop=True)) + elif self.mode == 'retro': + ans=self.getRetroAns(data) + elif self.mode == 'retroFast': + ans=self.getRetroFastAns(data) + print("Processing DONE!") + return ans + #Ретро режим, где расширяется окно добавлением новых элементов + def getRetroAns(self,data): + ans=np.asarray([]) + for i in tqdm(range(self.batchSize,len(data)-1)): + #self.onlineData=self.data[0:i] + window_data = data[0:i] + window_data.reset_index(drop=True) + ans=np.append(ans,(self.getOnlineAns(window_data))) + self.ans=ans + self.getAnaliz() + self.getMetrix() + return ans + #Ретро режим, где двигается окно + def getRetroFastAns(self,data): + #print('d - ',data) + ans=np.asarray([]) + for i in tqdm(range(len(data)-1-self.batchSize)): + #self.onlineData=self.data[i:i+self.batchSize] + window_data = data[i:i+self.batchSize] + #print('win - ',window_data) + window_data.reset_index(drop=True) + #print('win - ',window_data) + ans=np.append(ans,(self.getOnlineAns(window_data))) + self.ans=ans + self.getAnaliz() + self.getMetrix() + return ans + #Метод, который будет переопределять каждый дочерний класс + def getOnlineAns(self): + return 'Error' + def getAnaliz(self): + print("Start analiz...") + for i in (range(len(self.ans))): + sourceValue=self.data[self.signalParams['source']][i+self.batchSize] + targetValue=self.data[self.signalParams['target']][i+self.batchSize + 1] + if (targetValue)>sourceValue: + if self.ans[i]==1: + self.softAnalizList=np.append(self.softAnalizList,1) + self.hardAnalizList=np.append(self.hardAnalizList,1) + elif self.ans[i]==-1: + self.softAnalizList=np.append(self.softAnalizList,-1) + self.hardAnalizList=np.append(self.hardAnalizList,-1) + else: + self.softAnalizList=np.append(self.softAnalizList,0) + self.hardAnalizList=np.append(self.hardAnalizList,-1) + + elif (targetValue)self.BB.ans['pSTD'][-1]: + ans=-1 + elif lastValue dict: + return indicatorsAgrigator(req['indicators']) + + def getIndAns(self, dataDict: dict) -> dict: + return self.agrigateInds.getAns(dataDict) + + def getAns(self, data: pd.DataFrame(), indDataDict: dict) -> dict: + return self.getSigAns(data, self.getIndAns(indDataDict)) + + + +class sig_BB(coreSignalTrande): + """ + ind keys: + ind_BB + """ + + def __init__(self, name: str, req:dict): + super().__init__(name, req, 'ochl') + + def getSigAns(self, data: pd.DataFrame(), indAnsDict: dict) -> dict: + + lastValue = data[self.params['source']].to_list()[-1] + if lastValue>indAnsDict['ind_BB']['pSTD'][-1]: + ans='down' + elif lastValue dict: + ans = {} + for i in siganlsDict.keys(): + ans[i]=siganlsDict[i]['className'](name = i, req = siganlsDict[i]) + return ans + + def getAns(self, dataDict: dict) -> dict: + ans = {} + for i in dataDict.keys(): + ans[i] = self.signals[i].getAns(data = dataDict[i]['signalData'], + indDataDict = dataDict[i]['indicatorData']) + return ans + + +# In[ ]: + + + + + +# In[6]: + + +sigreq= { + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5} + } + } + } + +indReqDict ={'ind_BB':df_candle[:1000]} + + +# In[7]: + + +sigAgrReq = { + 'sig_BB':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5} + } + } + }, + 'sig_BB_2':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2} + } + } + } +} + +sigAgrData = { + 'sig_BB':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + }, + 'sig_BB_2':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + } +} + + +# In[ ]: + + + + + +# In[8]: + + +ttt=signalsAgrigator(sigAgrReq) + + +# In[9]: + + +ttt.__dict__ + + +# In[10]: + + +ttt.signals['sig_BB'].__dict__ + + +# In[11]: + + +ttt.getAns(sigAgrData) + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[12]: + + +list({'ttt':2}.keys())[0] + + +# In[13]: + + +test = sig_BB('sig_BB', sigreq) + + +# In[14]: + + +test.__dict__ + + +# In[ ]: + + + + + +# In[15]: + + +test.agrigateInds.__dict__ + + +# In[16]: + + +ians = test.getIndAns(indReqDict) +ians + + +# In[17]: + + +test.getAns(df_candle[:100],indReqDict) + + +# In[ ]: + + + + + +# In[ ]: + + + + diff --git a/notebooks/autogen/TrandVoter.py b/notebooks/autogen/TrandVoter.py new file mode 100644 index 0000000..10c82f3 --- /dev/null +++ b/notebooks/autogen/TrandVoter.py @@ -0,0 +1,360 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import pandas as pd +import datetime +import numpy as np +import random +from signals import * #потом удалить + + +# In[2]: + + +class trandeVoter(): + + def __init__(self,name): + + self.name = name # просто имя + self.trandeValuesList = ['up','none','down'] #словарь трегдов + self.matrixAmounts = None # матрица сумм + self.keysMatrixAmounts = None #ключи матрицы сумм, техническое поле + self.matrixProbability = None # матрица вероятностей + + + #функция которая создает df с заданным набором колонок и индексов. индексы - уникальные соотношения + def createDFbyNames(self, namesIndex, namesColoms,defaultValue=0.0): + df = pd.DataFrame(dict.fromkeys(namesColoms, [defaultValue]*pow(3,len(namesIndex))), + index=pd.MultiIndex.from_product([self.trandeValuesList]*len(namesIndex), names=namesIndex) + #,columns=namesColoms + ) + return(df) + + #создание матрицы сумм с дефолтным значением + def createMatrixAmounts(self,namesIndex: list) -> pd.DataFrame(): + self.matrixAmounts = self.createDFbyNames(namesIndex,self.trandeValuesList,0) + self.keysMatrixAmounts = self.matrixAmounts.to_dict('tight')['index_names'] + self.createMatrixProbability(namesIndex) + return(self.matrixAmounts) + + #создание матрицы вероятностей с дефолтным значением + def createMatrixProbability(self,namesIndex: list) -> pd.DataFrame(): + self.matrixProbability = self.createDFbyNames(namesIndex,self.trandeValuesList) + return(self.matrixProbability) + + #установка значений в матрицы сумм. signalDecisions - значения индикаторов key:value; trande - реальное значение + def setDecisionBySignals(self,signalDecisions: dict,trande: str) -> None: + buff=[] + for i in self.keysMatrixAmounts: + buff.append(signalDecisions[i]) + self.matrixAmounts.loc[tuple(buff),trande] += 1 + + #заполнение матрицы вероятностей вычисляемыми значениями из матрицы сумм + def generateMatrixProbability(self) -> None: + for i in range(self.matrixAmounts.shape[0]): + rowSum=sum(self.matrixAmounts.iloc[i]) + self.matrixProbability.iloc[i]['up'] = (self.matrixAmounts.iloc[i]['up'] / rowSum) + self.matrixProbability.iloc[i]['none'] = self.matrixAmounts.iloc[i]['none'] / rowSum + self.matrixProbability.iloc[i]['down'] = self.matrixAmounts.iloc[i]['down'] / rowSum + + #получение рещения из матрицы вероятностей по заданным значениям сигналов + def getDecisionBySignals(self,signalDecisions: dict) -> dict: + ans = {} + spliceSearch =self.matrixProbability.xs(tuple(signalDecisions.values()), + level=list(signalDecisions.keys()) + ) + ans['probability'] = spliceSearch.to_dict('records')[0] + ans['trande'] = spliceSearch.iloc[0].idxmax() + return ans + + #получение матриц вероятностей и суммы в видей словарей + def getMatrixDict(self) -> dict: + ans={} + ans['amounts'] = self.matrixAmounts.to_dict('tight') + ans['probability'] = self.matrixProbability.to_dict('tight') + return ans + + #установка матриц вероятностей и суммы в видей словарей + def setMatrixDict(self,matrixDict: dict) -> dict: + if matrixDict['amounts'] != None: + self.matrixAmounts = pd.DataFrame.from_dict(y['amounts'], orient='tight') + if matrixDict['probability'] != None: + self.matrixProbability = pd.DataFrame.from_dict(y['probability'], orient='tight') + + +# In[3]: + + +reqSig={ + 'BB1':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':15 + }, + 'BB2':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':20 + } +} + + + +# In[4]: + + +reqDS={'BB1':'up','BB2':'none'} + + +# In[7]: + + +reqCreate=list(reqSig.keys()) +reqCreate + + +# In[8]: + + +t=trandeVoter('piu') +o=t.createMatrixAmounts(['BB1', 'BB2']) +o + + +# In[9]: + + +for i in range(100000): + t.setDecisionBySignals({'BB1':random.choice(['up','down','none']), + 'BB2':random.choice(['up','down','none'])}, + random.choice(['up','down','none'])) + + + +# In[10]: + + +t.matrixAmounts + + +# In[11]: + + +t.generateMatrixProbability() + + +# In[577]: + + +t.matrixProbability + + +# In[14]: + + +t.setMatrixDict(y) + + +# In[15]: + + +t.getDecisionBySignals(reqDS) + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[13]: + + +y = t.getMatrixDict() +y + + +# In[16]: + + +ddf = pd.DataFrame.from_dict(y['amounts'], orient='tight') +ddf + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[17]: + + +t.matrixProbability.iloc[0]['up'] = (t.matrixProbability.iloc[0]['up'] / (sum(t.matrixProbability.iloc[0]))) +t.matrixProbability + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[18]: + + +t.matrixProbability['trandе'] + + +# In[19]: + + +random.choice(['up','down','none']) + + +# In[20]: + + +t.setDecisionBySignals(reqDS,'up') + + +# In[21]: + + +#t.matrixAmounts.at(bbb,'up') + +t.matrixAmounts.iloc[0] + + +# In[22]: + + +for i in t.matrixAmounts.iloc[0]: + print (i) + + +# In[23]: + + +(t.matrixAmounts.iloc[0]).idxmax() + + +# In[24]: + + +t.matrixAmounts + + +# In[ ]: + + + + + +# In[25]: + + +o.xs(('up','down'), level=['BB1','BB2'])['up'].iloc[0] + +#oldValue = o.xs(('up','down'), level=['BB1','BB2'])['up'] + +#o=o.replace(oldValue,oldValue.iloc[0]+1) +#o.xs(('up','down'), level=['BB1','BB2']) + + +# In[26]: + + +o.xs(('up','down'), level=['BB1','BB2'], drop_level=False)#.iloc[0].loc['up']=2#.at['up']=4 + + +# In[27]: + + +o.xs(('up','down'), level=['BB1','BB2']).iloc[0].at['up'] + + +# In[28]: + + +o.loc['up'].loc['down'] + + +# In[29]: + + +bbb=tuple(['up','down']) +bbb + + +# In[30]: + + +o.loc[bbb,] + + +# In[31]: + + +o.at[bbb, 'up']+=1 +o + + +# In[32]: + + +o.loc[bbb] + + +# In[33]: + + +dict(zip(['a','b','c'], [1,2,3])) + + +# In[ ]: + + + + diff --git a/notebooks/autogen/Voter_ne_tot.py b/notebooks/autogen/Voter_ne_tot.py new file mode 100644 index 0000000..5d43c6f --- /dev/null +++ b/notebooks/autogen/Voter_ne_tot.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import pandas as pd +import datetime +import numpy as np + +from signals import * #потом удалить + + +# In[2]: + + +class voter_v2(): + + def __init__(self,name): + self.name=name + pass + + def createPredictMatrixBySignals(self,signalsName): + pass + + +# In[ ]: + + + + + +# In[3]: + + +reqSig={ + 'BB1':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':15 + }, + 'BB2':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':20 + } +} + + +# In[4]: + + +reqCreate=reqSig.keys() +reqCreate + + +# In[5]: + + +class Voter(): + + def __init__ (self, name=''): + + self.name=name + self.mop={ + 'up': pd.DataFrame(), + 'down':pd.DataFrame(), + 'none':pd.DataFrame() + + } + self.value={} + self.decision='' + self.real_decision='' + self.keys=[] + self.slice_dict={} + + def addValue(self, dic_value): + self.value=dic_value + self.checkForNew() + self.setSlice() + self.getDecision() + + + + def checkForNew(self): + + if not (list(self.value.keys()) == self.keys): + self.createNewMop(list(self.value.keys())) + + + def createNewMop(self,missing_indicators): + print('reassembly mop') + new_columns= (missing_indicators) + + + #new_columns=new_columns.append(['value','p']) + + + + n=len(new_columns) + start_value=-1 + variator=3 + new_lst=[] + buf_lst=[] + for i in range(n): + buf_lst.append(start_value) + + + for i in range(pow(variator,n)): + new_lst.append(buf_lst.copy()) + + for j in range(n): + for i in range(len(new_lst)): + dob_iterator=(i // pow(variator,j)) % variator + new_lst[i][j]=new_lst[i][j] + dob_iterator + + + #print (new_columns) + self.keys=new_columns + new_columns = new_columns+['amount']+['percentage'] + + for i in new_lst: + i = i.extend([0,0]) + #i = i.append(0) + + #print(new_lst) + #print(new_columns) + new_df=pd.DataFrame(new_lst,columns=new_columns) + + self.mop['up']=pd.DataFrame.from_dict(new_df.to_dict()) + self.mop['down']=pd.DataFrame.from_dict(new_df.to_dict()) + self.mop['none']=pd.DataFrame.from_dict(new_df.to_dict()) + + + def setSlice(self): + + row_flg=True + self.slice_dict={} + for j in self.mop.keys(): + for index, row in self.mop[j].iterrows(): + for key, value in self.value.items(): + if value != row[key]: + #print('fasle ',key,value,row[key]) + row_flg=False + break + if row_flg: + self.slice_dict[j]=dict(row) + #print(j,dict(row)) + row_flg=True + + def getDecision (self): + + max_value=0 + for key, value in self.slice_dict.items(): + if value['amount'] >= max_value: + max_value = value['amount'] + self.decision = key + return self.decision + + def setDecision (self,real_decision): + self.real_decision=real_decision + self.updMop() + self.slice_dict[real_decision]['amount']+=1 + + + def updMop(self): + + row_flg=True + for index, row in self.mop[self.real_decision].iterrows(): + for key, value in self.value.items(): + if value != row[key]: + row_flg=False + break + if row_flg: + #self.slice_dict[j]=dict(row) + row['amount']=row['amount']+1 + row_flg=True + + +# In[6]: + + +test_dic_value_1={'lupa':1 } +test_dic_value_2={'lupa':1 , 'pupa':1} +test_dic_value_3={'lupa':1 , 'pupa':1 , 'zalupa':1 , 'zapupa':1 } +test_dic_value_4={'lupa':1 , 'pupa':1 , 'zalupa':1 , 'zapupa':-1 } + + +# In[7]: + + +test=Voter('huita') +test.addValue(test_dic_value_2) +test.decision +test.getDecision() + + +# In[8]: + + +test.setDecision('down') +test.getDecision() + + +# In[9]: + + +test.slice_dict + + +# In[ ]: + + + + + +# In[10]: + + +import pickle + + +# In[11]: + + +dictionary_data = {"a": 1, "b": 2} + + +a_file = open("data.pkl", "wb") + +pickle.dump(dictionary_data, a_file) + +a_file.close() + + +a_file = open("data.pkl", "rb") + +output = pickle.load(a_file) + +print(output) + + + +a_file.close() + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[12]: + + +arrays = [ + + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + + ["one", "two", "one", "two", "one", "two", "one", "two"], + +] +tuples = list(zip(*arrays)) +tuples + + +# In[13]: + + +index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"]) + + +# In[14]: + + +s = pd.DataFrame(np.random.randn(8), index=index) +s + + +# In[15]: + + +s.to_dict() + + +# In[16]: + + +s.loc(('bar', 'one')) + + +# In[18]: + + +iterables = [["up", "down", "none"], ["up", "down", "none"]] +df = pd.DataFrame({'col1': np.random.randn(9),'col2': np.random.randn(9)}, index=pd.MultiIndex.from_product(iterables, names=["first", "second"])) +df + + +# In[19]: + + +df.__dict__ + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[20]: + + +def createDF(namesIndex, namesColoms): + trandeValuesList = ['up','none','down'] + colomsName_lvl = ['trande','amaunt','probability'] + #micolumns = pd.MultiIndex.from_tuples( + #[('amaunt', 'up'), ('amaunt', 'none'), ('amaunt', 'down'), ('trande',),('probability',)], names=["lvl0", "lvl1"] + #) + df = pd.DataFrame({ + 'trande': [None]*pow(3,len(namesIndex)), + 'amaunt': [None]*pow(3,len(namesIndex)), + 'probability': [None]*pow(3,len(namesIndex)) + }, + index=pd.MultiIndex.from_product([trandeValuesList]*len(namesIndex), names=namesIndex) + ,columns=namesColoms + ) + return(df) + + +# In[21]: + + +dd=createDF( ['1','2','3'],['trande','amaunt','probability'] ) +dd + + +# In[22]: + + +df.xs(('up','down'), level=['first','second']) + + +# In[23]: + + +dd['trande'] + + +# In[24]: + + +tvl = ['up','none','down'] +colomsName_lvl = ['trande','amaunt','probability'] + + +# In[25]: + + +tuplesCol = list(zip(['amaunt']*3,tvl)) +tuplesCol + + +# In[26]: + + +df.loc['up','down'] + + +# In[27]: + + +df.xs(('up','down'), level=['first','second']).iloc[0] + + +# In[28]: + + +df_d=df.to_dict('tight') +df_d + + +# In[29]: + + +df_d['index_names'] + + +# In[30]: + + +ddf = pd.DataFrame.from_dict(df_d, orient='tight') +ddf + + +# In[31]: + + +tuple([1,2,3]) + + +# In[32]: + + +ddf.xs(('up','down'), level=['first','second']).iloc[0] + + +# In[ ]: + + + + diff --git a/notebooks/autogen/coreDraw.py b/notebooks/autogen/coreDraw.py new file mode 100644 index 0000000..31aabbc --- /dev/null +++ b/notebooks/autogen/coreDraw.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import pandas as pd +import datetime +import numpy as np +import plotly as pl +import plotly.graph_objs as go +import matplotlib.pyplot as plt +import random + + +import datetime +import matplotlib.dates as mdates +import matplotlib.pyplot as plt +import plotly +import plotly.graph_objs as go +from plotly.offline import init_notebook_mode, iplot +from plotly.subplots import make_subplots +init_notebook_mode() + +#import CoreTraidMath +import plotly.express as px + + + + + + + + + +# In[ ]: + + + + + +# In[2]: + + +class agrigateFig(): + + def __init__(self,data=[],needDraw=False ,subplot_titles=None): + self.data=data + self.ans=self.getAgrPlt() + if needDraw: + self.subplot_titles=subplot_titles + self.fig=coreDraw(self.ans,True,self.subplot_titles) + + + def getAgrPlt(self): + count=0 + ans=[] + for i in self.data: + count=count+1 + if type(i)==list: + for g in i: + for j in g.figDict: + ans.append(j) + ans[-1]['row']=count + else: + for j in i.figDict: + ans.append(j) + ans[-1]['row']=count + return ans + + +# In[3]: + + +class corePlt(): + def __init__(self, params={ + 'vtype':'', + 'df':pd.DataFrame(), + 'row':1, + 'col':1, + 'name':'' + }): + self.vtype=params['vtype'] + self.df=params['df'] + self.row=params['row'] + self.col=params['col'] + self.name=params['name'] + if 'colorType' in params.keys(): + self.colorType=params['colorType'] + + + + + +class coreDraw(): + def __init__(self, data=[],needShow=False,subplot_titles={}): + self.data=self.getPlts(data) + self.needShow=needShow + self.subplot_titles=subplot_titles + self.ans=self.getAns() + + + + + + + + def getBarColorList(self,l,colorType): + if colorType=='diffAbs': + ans=['green'] + for i in range(1,len(l)): + if abs(l[i])>abs(l[i-1]): + ans.append('green') + else: + ans.append('red') + elif colorType=='diff': + ans=['green'] + for i in range(1,len(l)): + if (l[i])>(l[i-1]): + ans.append('green') + else: + ans.append('red') + elif colorType=='normal': + ans=[] + for i in range(len(l)): + ans.append('gray') + return ans + + def getPlts(self, data): + ans=None + + if type(data)==list: + ans=[] + for i in data: + ans.append(corePlt(i)) + else: + ans=[corePlt(data)] + + + + + return ans + + def getAns(self): + ''' + data list + vtype + df + row=1 + col=1 + name + + + + ''' + + ans=None + + + + + maxRow=1 + maxCol=1 + for i in self.data: + if i.row > maxRow: + maxRow =i.row + if i.col > maxCol: + maxCol =i.col + + fig = make_subplots( + rows=maxRow, + cols=maxCol, + shared_xaxes=True, + vertical_spacing=0.1, + shared_yaxes=True, + #horizontal_spacing=0.02, + #column_widths=[] + subplot_titles=self.subplot_titles + ) + + + fig.update_layout(xaxis_rangeslider_visible=False) + fig.update_layout(barmode='relative') + + for i in self.data: + if i.vtype=='Scatter': + fig.add_trace(go.Scatter(x=i.df['date'],y=i.df['value'],name=i.name), row=i.row, col=i.col) + elif i.vtype=='OCHL': + fig.add_trace(go.Candlestick( + x=i.df['date'], + open=i.df['open'], + high=i.df['high'], + low=i.df['low'], + close=i.df['close'], + name=i.name), + row=i.row, col=i.col + ) + elif i.vtype=='Bars': + for j in i.df.keys(): + if j!='date': + try: + colorType=i.colorType + except: + colorType='normal' + colors=self.getBarColorList(i.df[j],colorType) + fig.add_trace(go.Bar(x=i.df['date'], y=i.df[j],name=j,marker_color=colors),row=i.row, col=i.col) + + + + + ans=fig + if self.needShow: + plotly.offline.iplot(fig) + return ans + + +# In[ ]: + + + + diff --git a/notebooks/autogen/dealManager.py b/notebooks/autogen/dealManager.py new file mode 100644 index 0000000..62b11ee --- /dev/null +++ b/notebooks/autogen/dealManager.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import pandas as pd +import datetime +import numpy as np +import uuid + + +# In[2]: + + +class DealManager(): + + def __init__(self): + self.commission=0.04 + self.columns=['uuid','figi','amount','startPrice','profit'] + self.deals = pd.DataFrame(columns=self.columns) + self.deals = self.deals.set_index('uuid') + + def findDealByPriceAndFig(self,price,figi): + ans=None + for i in range(self.deals.shape[0]): + if self.deals.iloc[i].startPrice == price and self.deals.iloc[i].figi == figi: + ans = self.deals.iloc[i].name + break + return ans + + def openDeal(self,figi,startPrice,amount=1): + desiredDeal=self.findDealByPriceAndFig(startPrice,figi) + if desiredDeal == None: + newDealDict={ + 'uuid':[str(uuid.uuid4())], + 'figi':[figi], + 'startPrice':[startPrice], + 'amount':[amount] + } + + #newDealDict['profit']=[startPrice*pow(1+self.commission,2)] + + + + newDeal=pd.DataFrame.from_dict(newDealDict).set_index('uuid') + self.deals=pd.concat([self.deals, newDeal]) + else: + self.deals.at[desiredDeal,'amount'] += amount + + def closeDeal(self,uuid,amount): + + desiredDeal=self.deals.loc[uuid] + if desiredDeal.amount - amount == 0: + self.deals = self.deals.drop(labels = [uuid],axis = 0) + else: + self.deals.at[uuid,'amount'] -= amount + #self.deals.loc[uuid].amount = desiredDeal.amount - amount + + + +# In[3]: + + +t=DealManager() +t.__dict__ + + +# In[ ]: + + + + + +# In[4]: + + +t.deals.shape[0] + + +# In[5]: + + +t.openDeal('huigi',100,1) +t.openDeal('huigi',100,3) +t.openDeal('huigi1',100,3) +t.openDeal('huigi1',200,3) + + +# In[6]: + + +t.deals + + +# In[7]: + + +t.deals[t.deals.figi == 'huigi1'] + + +# In[ ]: + + + + + +# In[8]: + + +for i in range(t.deals.shape[0]): + print(t.deals.iloc[i]) + + +# In[9]: + + +t.findDealByPriceAndFig + + +# In[10]: + + +t.closeDeal('78228979-3daf-470a-9c2a-8db180c8c3b0',1) +t.deals + + +# In[11]: + + +t.deals.iloc[0].name + + +# In[12]: + + +a=2 +a==None + + +# In[ ]: + + + + diff --git a/notebooks/autogen/decisionManager.py b/notebooks/autogen/decisionManager.py new file mode 100644 index 0000000..d2b330e --- /dev/null +++ b/notebooks/autogen/decisionManager.py @@ -0,0 +1,358 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[4]: + + +import pandas as pd +import datetime +import numpy as np + +import pickle +from signals import * +from dealManager import * +from trandeVoter import * +from riskManager import riskManager + + +# In[5]: + + +df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv") +df_candle.rename(columns={'timestamp': 'date'}, inplace=True) +df_candle + + +# In[6]: + + +class decsionManager(): + + + def __init__(self,name): + self.name = name + self.RM = riskManager() + self.DM = DealManager() + self.TV = trandeVoter(name) + self.SA = signalAgrigator() + pass + + #вытащенный из signalAgrigator метод теста для сигналов + def getSignalTest(self,data: pd.DataFrame(),reqSig: dict, batchSize=30, dataType='candel') -> dict: + + self.SA.mode = 'retroFast' + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=30 + ) + ans = t.SA.getAns(data) + return ans + + #метод для генерации матрицы вероятностей. + def generateMatrixProbability(self, + data: pd.DataFrame(), + reqSig: dict, + target: str, + batchSize=30, + #dataType='candel' + ): + data=data.reset_index(drop=True) + + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=batchSize + ) + self.TV.createMatrixAmounts(reqSig.keys()) + for i in range(data.shape[0]-batchSize-1): + sigAns=self.SA.getAns(data[i:i+batchSize]) + rightAns=self.getRetroStepAns(data[target][i],data[target][i+1]) + self.TV.setDecisionBySignals(self.KostilEbaniy(sigAns),rightAns) + self.TV.generateMatrixProbability() + + #без коментариев блять + def KostilEbaniy(self,d): + ans={} + for i in d.keys(): + if d[i] == 0: + ans[i] = 'none' + elif d[i] == 1: + ans[i] = 'up' + elif d[i] == -1: + ans[i] = 'down' + return ans + + #тож понятная хуита + def getRetroStepAns(self, value1,value2): + + if value1 == value2: + ans = 'none' + elif value1 < value2: + ans = 'up' + else: + ans = 'down' + return ans + + #метод для онлай получения решения по сигналу + def getSignal(self,data: pd.DataFrame(),reqSig: dict, dataType='candel') -> dict: + data=data.reset_index(drop=True) + self.SA.mode = 'online' + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=30 + ) + ans = t.SA.getAns(data) + return ans + + + #Создание сигналов. Вызывать перед getOnlineAns + def crateSignals(self,data: pd.DataFrame(),reqSig: dict, dataType='candel'): + data=data.reset_index(drop=True) + self.SA.mode = 'online' + t.SA.createSingnalInstances( + data = data, + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=30 + ) + + + def getOnlineAns(self,data: pd.DataFrame(),price): + sigAns = self.SA.getAns(data) + prob = self.TV.getDecisionBySignals(sigAns) + ans = self.RM.getDecision(sigAns,prob,price) + return ans + + + + + + +# In[ ]: + + + + + +# In[7]: + + +t= decsionManager('TEST') + + +# In[8]: + + +t.__dict__ + + +# In[9]: + + +reqSig={ + 'BB1':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':15 + }, + 'BB2':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':20 + } +} + + +# In[10]: + + +reqSig.keys() + + +# In[11]: + + +t.SA.__dict__ + + +# In[12]: + + +t.generateMatrixProbability(df_candle[:10000],reqSig,'close',40) + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[13]: + + +mop = t.TV.matrixProbability +mop + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[14]: + + +t.getSignal(df_candle[:10000],reqSig) + + +# In[15]: + + +t.getSignalTest(df_candle[:10000],reqSig,40) + + +# In[ ]: + + + + + +# In[16]: + + +t.SA.createSingnalInstances( + data = df_candle[:10000], + dictAgrigSignal = reqSig, + dataType='candel', + batchSize=30 +) + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[17]: + + +reqSig={ + 'BB1':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':15 + }, + 'BB2':{ + 'className':signal_BB, + 'indParams':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5}, + 'signalParams':{'source':'close','target':'close'}, + 'batchSize':20 + } +} + + +# In[18]: + + +t=decsionManager(reqSig) + + +# In[ ]: + + + + + +# In[ ]: + + +import pickle + + +# In[ ]: + + +dictionary_data = {"a": 1, "b": 2} + + +a_file = open("data.pkl", "wb") + +pickle.dump(reqSig, a_file) + +a_file.close() + + +a_file = open("data.pkl", "rb") + +output = pickle.load(a_file) + +print(output) + + + +a_file.close() + + +# In[ ]: + + + + diff --git a/notebooks/autogen/decisionManager_v2.py b/notebooks/autogen/decisionManager_v2.py new file mode 100644 index 0000000..6817807 --- /dev/null +++ b/notebooks/autogen/decisionManager_v2.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[5]: + + +import os + +import pandas as pd +import datetime +import numpy as np + +from tqdm import tqdm + +from indicators_v2 import * +from signals_v2 import * +from dealManager import * +from trandeVoter import * +from riskManager import * +import pickle + + +# In[6]: + + +df_candle = pd.read_csv("../data/EURUSD_price_candlestick.csv") +df_candle.rename(columns={'timestamp': 'date'}, inplace=True) +df_candle + + +# In[7]: + + +df_candle['close'] + + +# In[8]: + + +class decsionManager: + ''' +sigAgrReq = { + 'sig_BB':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2.5} + } + } + }, + 'sig_BB_2':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2} + } + } + } +} + +sigAgrData = { + 'sig_BB':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + }, + 'sig_BB_2':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + } +} + + +sigAgrRetroTemplate = { + 'sig_BB':{ + 'signalData': None, + 'indicatorData' :{'ind_BB': None} + }, + 'sig_BB_2':{ + 'signalData': None, + 'indicatorData' :{'ind_BB': None} + } +} + + + + + + + ''' + + + + + def __init__(self,name, sigDict: dict): + self.RM = riskManager() + self.DM = DealManager() + self.TV = trandeVoter(name) + self.SA = signalsAgrigator(sigDict) + self.sigDict = sigDict + + + def getOnlineAns(self, signalsAns: dict, price: float) -> dict: + probabilityDecsion = self.TV.getDecisionBySignals(self.getSignalsAns(signalsAns)) + RMD = self.RM.getDecision(probabilityDecision=probabilityDecsion, price=price, deals = self.DM.deals) + return RMD + + def getSignalsAns(self, signalsDataDict: dict) -> dict: + return self.SA.getAns(signalsDataDict) + + def getRightAns(self,value_1, value_2): + + ans='' + + if value_1 > value_2: + ans = 'down' + elif value_1 < value_2: + ans = 'up' + else: + ans = 'none' + + return ans + + def getRetroTrendAns(self, retroTemplateDict: dict, data: pd.DataFrame(), window: int) -> list: + + reqSig={} + ans = { + 'signalsAns':[], + 'rightAns':[] + + } + target = '' + + + for k in tqdm(range(data.shape[0]-window-1)): + for i in retroTemplateDict.keys(): + reqSig[i] = {'signalData': data[k:k+window], 'indicatorData':{}} + target = self.SA.signals[i].params['target'] + for j in retroTemplateDict[i]['indicatorData'].keys(): + reqSig[i]['indicatorData'][j] = data[k:k+window] + + sigAns = self.getSignalsAns(reqSig) + rightAns = self.getRightAns(data[target][k], data[target][k+1]) + + ans['signalsAns'].append(sigAns) + ans['rightAns'].append(rightAns) + + return ans + + + def generateMatrixProbabilityFromDict(self, dictSignals: dict) -> dict: + self.TV.createMatrixAmounts(dictSignals['signalsAns'][0].keys()) + for i in range(len(dictSignals['signalsAns'])): + self.TV.setDecisionBySignals(signalDecisions = dictSignals['signalsAns'][i], + trande = dictSignals['rightAns'][i]) + self.TV.generateMatrixProbability() + + def createDump(self,postfix='') -> str: + dataDict = { + 'RM':self.RM, + 'DM':self.DM, + 'TV':self.TV, + 'SA':self.SA, + 'sigDict':self.sigDict + } + fileName='data_'+postfix+'.pickle' + with open(fileName, 'wb') as f: + pickle.dump(dataDict, f) + + return os.path.abspath(fileName) + + def loadDump(self,path: str) -> None: + + with open(path, 'rb') as f: + dataDict = pickle.load(f) + + self.RM = dataDict['RM'] + self.DM = dataDict['DM'] + self.TV = dataDict['TV'] + self.SA = dataDict['SA'] + self.sigDict = dataDict['sigDict'] + + +# In[9]: + + +sigAgrReq = { + 'sig_BB':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2.5} + } + } + }, + 'sig_BB_2':{ + 'className':sig_BB, + 'params':{'source':'close','target':'close'}, + 'indicators':{ + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':30,'valueType':'close','kDev':2} + } + } + } +} + +sigAgrData = { + 'sig_BB':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + }, + 'sig_BB_2':{ + 'signalData': df_candle[990:1000], + 'indicatorData' :{'ind_BB': df_candle[:1000]} + } +} + + +sigAgrRetroTemplate = { + 'sig_BB':{ + 'signalData': None, + 'indicatorData' :{'ind_BB': None} + }, + 'sig_BB_2':{ + 'signalData': None, + 'indicatorData' :{'ind_BB': None} + } +} + + +# In[10]: + + +test = decsionManager('Pipa', sigAgrReq) + + +# In[11]: + + +test.__dict__ + + +# In[12]: + + +test.TV.__dict__ + + +# In[13]: + + +test.SA.signals['sig_BB'].params['target'] + + +# In[14]: + + +test.getSignalsAns(sigAgrData) + + +# In[15]: + + +#test.loadDump('C:\\Users\\Redsandy\\PyProj\\Trade\\MVP\\data_pupa.pickle') + + +# In[16]: + + +uuu = test.getRetroTrendAns(sigAgrRetroTemplate,df_candle[:5000],40) +uuu + + +# In[17]: + + +test.generateMatrixProbabilityFromDict(uuu) + + +# In[18]: + + +test.TV.__dict__ + + +# In[19]: + + +test.getOnlineAns(sigAgrData, 0.0) + + +# In[20]: + + +(test.DM.deals).shape + + +# In[21]: + + +test.createDump('pupa') + + +# In[ ]: + + + + + +# In[22]: + + +with open('C:\\Users\\Redsandy\\PyProj\\Trade\\MVP\\data_pupa.pickle', 'rb') as f: + data_new = pickle.load(f) +data_new + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[ ]: + + + + diff --git a/notebooks/autogen/indicators_v2.py b/notebooks/autogen/indicators_v2.py new file mode 100644 index 0000000..5c74fac --- /dev/null +++ b/notebooks/autogen/indicators_v2.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[8]: + + +import pandas as pd +import datetime +import numpy as np + +import CoreTraidMath + + +# In[9]: + + +df_candle = pd.read_csv(r"../data/EURUSD_price_candlestick.csv") +df_candle.rename(columns={'timestamp': 'date'}, inplace=True) +df_candle + + +# In[10]: + + +class coreIndicator(): + + def __init__(self,options: dict, dataType: str = None, predictType: str = None, name: str = None): + self.options = options + self.dataType = dataType #ochl + self.predictType = predictType #trend + + + def getAns(self, data: pd.DataFrame() ): + return "ERROR" + + +# In[11]: + + +class ind_BB(coreIndicator): + """ + options + MeanType -> SMA + window -> int + valueType -> str: low, high, open, close + kDev -> float + + """ + + def __init__(self,options: dict,name = None): + super().__init__( + options = options, + dataType = 'ochl', + predictType = 'trend', + name = name + ) + + def getAns(self, data: pd.DataFrame()): + data=data.reset_index(drop=True) + ans={} + opMA={'dataType':'ohcl', + 'action':'findMean', + 'actionOptions':{ + 'MeanType':self.options['MeanType'], + 'valueType':self.options['valueType'], + 'window':self.options['window'] + } + } + ans['BB']=CoreTraidMath.CoreMath(data,opMA).ans + opSTD={'dataType':'ohcl', + 'action':'findSTD', + 'actionOptions':{'valueType':self.options['valueType'],'window':self.options['window']} + } + ans['STD']=CoreTraidMath.CoreMath(data,opSTD).ans + ans['pSTD']=ans['BB']+ans['STD']*self.options['kDev'] + ans['mSTD']=ans['BB']-ans['STD']*self.options['kDev'] + ans['x']=np.array(data['date'][self.options['window']-1:].to_list()) + self.ans= ans + return ans + + +# In[12]: + + +class indicatorsAgrigator: + + def __init__ (self,indDict={}): + self.indDict = indDict + self.indInst = {} + self.ans={} + self.createIndicatorsInstance() + + def createIndicatorsInstance(self): + for i in self.indDict.keys(): + self.indInst[i]=self.indDict[i]['className'](self.indDict[i]['params']) + + def getAns(self,dataDict={}): + ans={} + for i in dataDict.keys(): + ans[i] = self.indInst[i].getAns(dataDict[i]) + return ans + + +# In[13]: + + +indicators = { + 'ind_BB':{ + 'className':ind_BB, + 'params':{'MeanType':'SMA','window':15,'valueType':'close','kDev':2.5} + } +} +dataDic={ + 'ind_BB':df_candle[:1000] +} + + +# In[ ]: + + + + + +# In[14]: + + +ia= indicatorsAgrigator(indicators) + + +# In[15]: + + +ia.__dict__ + + +# In[16]: + + +ia.indInst['ind_BB'].__dict__ + + +# In[17]: + + +ia.getAns(dataDict=dataDic) + + +# In[ ]: + + + + + +# In[ ]: + + + + + +# In[18]: + + +op = {'MeanType':'SMA','window':5,'valueType':'low','kDev':2} + + +# In[19]: + + +t = ind_BB(op) + + +# In[20]: + + +t.getAns(df_candle[:100]) + + +# In[21]: + + +t.__dict__ + + +# In[ ]: + + + + + +# In[ ]: + + + + diff --git a/poetry.lock b/poetry.lock index 2a88a35..c9a17ec 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "anyio" @@ -1099,6 +1099,7 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -1598,6 +1599,16 @@ files = [ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, @@ -1754,13 +1765,13 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>= [[package]] name = "nbconvert" -version = "7.12.0" -description = "Converting Jupyter Notebooks" +version = "7.16.2" +description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." optional = false python-versions = ">=3.8" files = [ - {file = "nbconvert-7.12.0-py3-none-any.whl", hash = "sha256:5b6c848194d270cc55fb691169202620d7b52a12fec259508d142ecbe4219310"}, - {file = "nbconvert-7.12.0.tar.gz", hash = "sha256:b1564bd89f69a74cd6398b0362da94db07aafb991b7857216a766204a71612c0"}, + {file = "nbconvert-7.16.2-py3-none-any.whl", hash = "sha256:0c01c23981a8de0220255706822c40b751438e32467d6a686e26be08ba784382"}, + {file = "nbconvert-7.16.2.tar.gz", hash = "sha256:8310edd41e1c43947e4ecf16614c61469ebc024898eb808cce0999860fc9fb16"}, ] [package.dependencies] @@ -1787,7 +1798,7 @@ docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sp qtpdf = ["nbconvert[qtpng]"] qtpng = ["pyqtwebengine (>=5.15)"] serve = ["tornado (>=6.1)"] -test = ["flaky", "ipykernel", "ipywidgets (>=7)", "pytest"] +test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest"] webpdf = ["playwright"] [[package]] @@ -2483,6 +2494,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -3386,4 +3398,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "29bb47a642b8c954617e72b3b407196c5cc1e13c7eb483c712538b08c3d4b8e7" +content-hash = "8e185f19d0891fa375ca3e5d878742e677c332f20e5969c6e058af9ba5c41e2e" diff --git a/pyproject.toml b/pyproject.toml index b2c2ad3..23ef214 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,7 @@ black = "^23.12.0" tinkoff-grpc = {git = "git@github.com:strategy155/tinkoff_grpc.git", branch="master"} python-dotenv = "^1.0.0" jupyterlab = "^4.0.9" +nbconvert = "^7.16.2" [tool.poetry.dev-dependencies]