How to Create a Dashboard to Dominate the Stock Market Using Python and Dash

Free Options Order Flow, Prices, Fundamentals, Chatter all in one

Eric Kleppen
Sep 4 · 24 min read
Image for post
Image for post
Photo by Andrew Neel on Unsplash

Keep Your Eyes on the Big Picture

Contents:

Reviewing the Dashboard

Image for post
Image for post
The dashboard (zoomed out for a smaller image)
Image for post
Image for post
Input fields
Image for post
Image for post
1 Minute chart

Sourcing the Data

Image for post
Image for post
Data sources feeding the Dash App

Yahoo! Finance

pip install yfinance

Market Watch

Image for post
Image for post
Marketwatch financials tab
pip install beautifulsoup4

Twitter

pip install tweepy

Reddit

pip install praw

Dash Framework Refresher

pip install dash

Layout

Callbacks

Dash Bootstrap Components

pip install dash-bootstrap-components

The File Structure

Image for post
Image for post
File structure

File config.py & Managing API Keys

File stocks.sqlite

Creating the Files

Import dependencies

import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import dash_table
from dash.exceptions import PreventUpdate
import flask
from flask import Flask
import pandas as pd
import dateutil.relativedelta
from datetime import date
import datetime
import yfinance as yf
import numpy as np
import praw
import sqlite3
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots

Create dash_utils.py

ticker_inputs

def ticker_inputs(inputID, pickerID, MONTH_CUTTOFF):
#calculate the current date
currentDate = date.today()
#calculate past date for the max allowed date
pastDate = currentDate - dateutil.relativedelta.relativedelta(months=MONTH_CUTTOFF)

#return the layout components
return html.Div([
dcc.Input(id = inputID, type="text", placeholder="MSFT")
, html.P(" ")
, dcc.DatePickerRange(
id = pickerID,
min_date_allowed=pastDate,
start_date = pastDate,
#end_date = currentDate
)])

make_card

def make_card(alert_message, color, cardbody, style_dict = None):

return dbc.Card([ dbc.Alert(alert_message, color=color)
,dbc.CardBody(cardbody)
], style = style_dict)

make_item

Image for post
Image for post
Price chart Accordion
def make_item(button, cardbody, i):
# This function makes the accordion items
return dbc.Card([
dbc.CardHeader(
html.H2(
dbc.Button(
button,
color="link",
id=f"group-{i}-toggle"))),
dbc.Collapse(
dbc.CardBody(cardbody),
id=f"collapse-{i}")])

make_table

def make_table(id, dataframe, lineHeight = '17px', page_size = 5):
return dash_table.DataTable(
id=id,
css=[{'selector': '.row', 'rule': 'margin: 0'}],
columns=[
{"name": i, "id": i} for i in dataframe.columns
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'},
style_cell={'textAlign': 'left'},
style_data={
'whiteSpace': 'normal',
'height': 'auto',
'lineHeight': lineHeight
},

style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_cell_conditional=[
{'if': {'column_id': 'title'},
'width': '130px'},
{'if': {'column_id': 'post'},
'width': '500px'},
{'if': {'column_id': 'datetime'},
'width': '130px'},
{'if': {'column_id': 'text'},
'width': '500px'}],
page_current=0,
page_size=page_size,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[]
)#end table

Create fin_report_data.py

def get_financial_report(ticker):#build URLs
urlfinancials = 'https://www.marketwatch.com/investing/stock/'+ticker+'/financials'
urlbalancesheet = 'https://www.marketwatch.com/investing/stock/'+ticker+'/financials/balance-sheet'
#request the data using beautiful soup
text_soup_financials = BeautifulSoup(requests.get(urlfinancials).text,"html")
text_soup_balancesheet = BeautifulSoup(requests.get(urlbalancesheet).text,"html")
Image for post
Image for post
Example of rowTitle on Market Watch
# build lists for Income statement
titlesfinancials = text_soup_financials.findAll('td', {'class': 'rowTitle'})
epslist=[]
netincomelist = []
longtermdebtlist = []
interestexpenselist = []
ebitdalist= []
#load data into lists if the row title is found
for title in titlesfinancials:
if 'EPS (Basic)' in title.text:
epslist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'Net Income' in title.text:
netincomelist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'Interest Expense' in title.text:
interestexpenselist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'EBITDA' in title.text:
ebitdalist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
# find the table headers for the Balance sheet
titlesbalancesheet = text_soup_balancesheet.findAll('td', {'class': 'rowTitle'})
equitylist=[]
for title in titlesbalancesheet:
if 'Total Shareholders\' Equity' in title.text:
equitylist.append( [td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'Long-Term Debt' in title.text:
longtermdebtlist.append( [td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
#get the data from the income statement lists 
#use helper function get_element

eps = get_element(epslist,0)
epsGrowth = get_element(epslist,1)
netIncome = get_element(netincomelist,0)
shareholderEquity = get_element(equitylist,0)
roa = get_element(equitylist,1)
longtermDebt = get_element(longtermdebtlist,0)
interestExpense = get_element(interestexpenselist,0)
ebitda = get_element(ebitdalist,0)
# load all the data into dataframe 
fin_df= pd.DataFrame({'eps': eps,'eps Growth': epsGrowth,'net Income': netIncome,'shareholder Equity': shareholderEquity,'roa':
roa,'longterm Debt': longtermDebt,'interest Expense': interestExpense,'ebitda': ebitda},index=range(date.today().year-5,date.today().year))

fin_df.reset_index(inplace=True)

return fin_df
#helper function
def get_element(list,element):
try:
return list[element]
except:
return '-'

Create reddit_data.py

import pandas as pd
import praw
from config import r_cid, r_csec, r_uag
def get_reddit(cid= r_cid, csec= r_csec, uag= r_uag, subreddit='wallstreetbets'):

#connect to reddit
reddit = praw.Reddit(client_id= cid, client_secret= csec, user_agent= uag)
#get the new reddit posts
posts = reddit.subreddit(subreddit).new(limit=None)
#load the posts into a pandas dataframe
p = []
for post in posts:
p.append([post.title, post.score, post.selftext])
posts_df = pd.DataFrame(p,columns=['title', 'score', 'post'])

return posts_df

Create twitter_data.py

def get_all_tweets(screen_name
,consumer_key = t_conkey
, consumer_secret= t_consec
, access_key= t_akey
, access_secret= t_asec
):
#Twitter only allows access to a users most recent 3240 tweets with this method

#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)

#initialize a list to hold all the tweepy Tweets
alltweets = []

#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)

#save most recent tweets
alltweets.extend(new_tweets)

#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1

#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:

#all subsequent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)

#save most recent tweets
alltweets.extend(new_tweets)

#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1

outtweets = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
tweets_df = pd.DataFrame(outtweets, columns = ['time', 'datetime', 'text'])
return tweets_df
def get_options_flow():

#connect to the sqlite database
conn = sqlite3.connect('stocks.sqlite')

#use get_all_tweets to pull the data from the twitter users
ss = get_all_tweets(screen_name ="SwaggyStocks")
uw = get_all_tweets(screen_name ="unusual_whales")

#clean the text data
ss['source'] = 'swaggyStocks'
ss['text'] = hero.remove_urls(ss['text'])
ss['text'] = [n.replace('$','') for n in ss['text']]

#clean the text data
uw['source'] = 'unusual_whales'
uw['text'] = hero.remove_urls(uw['text'])
uw['text'] = [n.replace('$','') for n in uw['text']]
uw['text'] = [n.replace(':','') for n in uw['text']]
uw['text'] = [n.replace('\n',' ') for n in uw['text']]
uw['text'] = [n.replace(' ',' ') for n in uw['text']]

#concat the tweets into one dataframe
tweets = pd.concat([ss, uw])
#save the tweets to sqlite database
tweets.to_sql('tweets', conn, if_exists = 'replace')
return print('done')

Create index.py

#Connect to sqlite database
conn = sqlite3.connect('stocks.sqlite')
#instantiate dash app server using flask for easier hosting
server = Flask(__name__)
app = dash.Dash(__name__,server = server ,meta_tags=[{ "content": "width=device-width"}], external_stylesheets=[dbc.themes.BOOTSTRAP])
#used for dynamic callbacks
app.config.suppress_callback_exceptions = True
#get options flow from twitter
get_options_flow()
flow = pd.read_sql("select datetime, text from tweets order by datetime desc", conn)
#get reddit data
global dfr
dfr = get_reddit()
Image for post
Image for post
Showing how the layout components are wrapped within each other to produce the desired layout.
layout1 = html.Div([
dbc.Row([dbc.Col(make_card("Enter Ticker", "success", ticker_inputs('ticker-input', 'date-picker', 36)))]) #row 1
,dbc.Row([dbc.Col([make_card("Twitter Order Flow", 'primary', make_table('table-sorting-filtering2', flow, '17px', 10))])
,dbc.Col([make_card("Fin table ", "secondary", html.Div(id="fin-table"))])
])
, dbc.Row([make_card("select ticker", "warning", "select ticker")],id = 'cards') #row 2
, dbc.Row([
dbc.Col([
dbc.Row([make_card("Wallstreet Bets New Posts", 'primary'
,[html.P(html.Button('Refresh', id='refresh'))
, make_table('table-sorting-filtering', dfr, '17px', 4)])], justify = 'center')
])
,dbc.Col([dbc.Row([dbc.Alert("_Charts_", color="primary")], justify = 'center')
,dbc.Row(html.Div(id='x-vol-1'), justify = 'center')
, dcc.Interval(
id='interval-component',
interval=1*150000, # in milliseconds
n_intervals=0)
, dcc.Interval(
id='interval-component2',
interval=1*60000, # in milliseconds
n_intervals=0)
,dbc.Row([html.Div(id='tweets')])
])#end col
])#end row
]) #end div
app.layout= layout1
make_card("Twitter Order Flow", 'primary', make_table('table-sorting-filtering2', flow, '17px', 10))
Image for post
Image for post
Table inside the Card

Adding Callbacks

Refreshing Twitter Data

@app.callback(
Output('tweets', 'children'),
[Input('interval-component2', 'n_intervals'),
])
def new_tweets(n):
get_options_flow()
return html.P(f"Reloaded Tweets {n}")

Loading the Company Info Cards

@app.callback(Output('cards', 'children'),
[Input('ticker-input', 'value')])
def refresh_cards(ticker):
ticker = ticker.upper()
if ticker is None:
TICKER = 'MSFT'
else:
TICKER = yf.Ticker(ticker)

cards = [ dbc.Col(make_card("Previous Close ", "secondary", TICKER.info['previousClose']))
, dbc.Col(make_card("Open", "secondary", TICKER.info['open']))
, dbc.Col(make_card("Sector", 'secondary', TICKER.info['sector']))
, dbc.Col(make_card("Beta", 'secondary', TICKER.info['beta']))
, dbc.Col(make_card("50d Avg Price", 'secondary', TICKER.info['fiftyDayAverage']))
, dbc.Col(make_card("Avg 10d Vol", 'secondary', TICKER.info['averageVolume10days']))
] #end cards list
return cards

Sorting and Filtering Reddit and Twitter tables

@app.callback(
Output('table-sorting-filtering', 'data'),
[Input('table-sorting-filtering', "page_current"),
Input('table-sorting-filtering', "page_size"),
Input('table-sorting-filtering', 'sort_by'),
Input('table-sorting-filtering', 'filter_query'),
Input('refresh', 'n_clicks')])
def update_table(page_current, page_size, sort_by, filter, n_clicks):
filtering_expressions = filter.split(' && ')
if n_clicks is None:
raise PreventUpdate
else:
dff = get_reddit()
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format

dff = dff.loc[dff[col_name].str.startswith(filter_value)]
if len(sort_by):
dff = dff.sort_values(
[col['column_id'] for col in sort_by],
ascending=[
col['direction'] == 'asc'
for col in sort_by
],
inplace=False)
page = page_current
size = page_size
return dff.iloc[page * size: (page + 1) * size].to_dict('records')

Populating the Financial Report

@app.callback(Output('fin-table', 'children'),
[Input('ticker-input', 'value')])
def fin_report(sym):
sym = sym.upper()
df = get_financial_report(sym)
table = dbc.Table.from_dataframe(df, striped=True
, bordered=True, hover=True)
return table

Populating the Charts

Image for post
Image for post
Example Plotly Candlestick chart
@app.callback(Output('x-vol-1', 'children'),
[Input('ticker-input', 'value')
, Input('date-picker', 'start_date')
, Input('date-picker', 'end_date')
, Input('interval-component', 'n_intervals')
])
def create_graph(ticker,startdate, enddate, n):
ticker = ticker.upper()
df1 = yf.download(ticker,startdate, enddate)
df1.reset_index(inplace=True)

fig1 = go.Figure(data=[go.Candlestick(x=df1['Date'],
open=df1['Open'], high=df1['High'],
low=df1['Low'], close=df1['Close'])
])
df2 = yf.download(ticker, period = "5d", interval = "1m")
df2.reset_index(inplace=True)

fig2 = go.Figure(data=[go.Candlestick(x=df2['Datetime'],
open=df2['Open'], high=df2['High'],
low=df2['Low'], close=df2['Close'])
])
df3 = yf.download(ticker, period = "1d", interval = "1m")
df3.reset_index(inplace=True)

fig3 = go.Figure(data=[go.Candlestick(x=df3['Datetime'],
open=df3['Open'], high=df3['High'],
low=df3['Low'], close=df3['Close'])
])

accordion = html.Div([make_item("Daily Chart",
dcc.Graph(figure = fig1), 1 )
, make_item("5d 5m Chart"
, dcc.Graph( figure = fig2), 2)
, make_item("1d 1m Chart"
, dcc.Graph(figure = fig3), 3)
], className="accordion")

return accordion
Image for post
Image for post
Photo by Viacheslav Bublyk on Unsplash

Final Thoughts and Complete Code

The Code

Dash_utils.py

import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import dash_table
import flask
from flask import Flask
import pandas as pd
import dateutil.relativedelta
from datetime import date
import datetime
import yfinance as yf
import numpy as np
import praw
import sqlite3
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def make_table(id, dataframe, lineHeight = '17px', page_size = 5):
return dash_table.DataTable(
id=id,
css=[{'selector': '.row', 'rule': 'margin: 0'}],
columns=[
{"name": i, "id": i} for i in dataframe.columns
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'},
style_cell={'textAlign': 'left'},
style_data={
'whiteSpace': 'normal',
'height': 'auto',
'lineHeight': lineHeight
},
# style_table = {'width':300},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_cell_conditional=[
{'if': {'column_id': 'title'},
'width': '130px'},
{'if': {'column_id': 'post'},
'width': '500px'},
{'if': {'column_id': 'datetime'},
'width': '130px'},
{'if': {'column_id': 'text'},
'width': '500px'}],
page_current=0,
page_size=page_size,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[],
#dataframe.to_dict('records')
)
def make_card(alert_message, color, cardbody, style_dict = None):
return dbc.Card([ dbc.Alert(alert_message, color=color)
,dbc.CardBody(cardbody)
], style = style_dict)#end card
def ticker_inputs(inputID, pickerID, MONTH_CUTTOFF):

currentDate = date.today()
pastDate = currentDate - dateutil.relativedelta.relativedelta(months=MONTH_CUTTOFF)

return html.Div([
dcc.Input(id = inputID, type="text", placeholder="MSFT")
, html.P(" ")
, dcc.DatePickerRange(
id = pickerID,
min_date_allowed=pastDate,
#max_date_allowed=currentDate,
#initial_visible_month=dt(2017, 8, 5),
start_date = pastDate,
#end_date = currentDate
)])
def make_item(button, cardbody, i):
# we use this function to make the example items to avoid code duplication
return dbc.Card([
dbc.CardHeader(
html.H2(
dbc.Button(
button,
color="link",
id=f"group-{i}-toggle",
))
),
dbc.Collapse(
dbc.CardBody(cardbody),
id=f"collapse-{i}",
)])

Reddit_data.py

import pandas as pd
import dateutil.relativedelta
from datetime import date
import datetime
import yfinance as yf
import numpy as np
import praw
import sqlite3
from config import r_cid, r_csec, r_uag#return a dataframe for the newest reddit posts
def get_reddit(cid= r_cid, csec= r_csec, uag= r_uag, subreddit='wallstreetbets'):

#connect to reddit
reddit = praw.Reddit(client_id= cid, client_secret= csec, user_agent= uag)
#get the new reddit posts
posts = reddit.subreddit(subreddit).new(limit=None)
#load the posts into a pandas dataframe
p = []
for post in posts:
p.append([post.title, post.score, post.selftext])
posts_df = pd.DataFrame(p,columns=['title', 'score', 'post'])

return posts_df

tweet_data.py

import tweepy
import pandas as pd
import sqlite3
import json
import datetime
from datetime import date
import texthero as hero
import regex as re
import string
from config import t_conkey, t_consec, t_akey, t_asecpd.set_option('display.max_colwidth',None)def get_all_tweets(screen_name
,consumer_key = t_conkey
, consumer_secret= t_consec
, access_key= t_akey
, access_secret= t_asec
):
#Twitter only allows access to a users most recent 3240 tweets with this method

#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)

#initialize a list to hold all the tweepy Tweets
alltweets = []

#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)

#save most recent tweets
alltweets.extend(new_tweets)

#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1

#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
#print(f"getting tweets before {oldest}")

#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)

#save most recent tweets
alltweets.extend(new_tweets)

#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1

outtweets = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
tweets_df = pd.DataFrame(outtweets, columns = ['time', 'datetime', 'text'])
return tweets_dfdef get_options_flow():

conn = sqlite3.connect('stocks.sqlite')
ss = get_all_tweets(screen_name ="SwaggyStocks")
uw = get_all_tweets(screen_name ="unusual_whales")

ss['source'] = 'swaggyStocks'
ss['text'] = hero.remove_urls(ss['text'])
ss['text'] = [n.replace('$','') for n in ss['text']]


uw['source'] = 'unusual_whales'
uw['text'] = hero.remove_urls(uw['text'])
uw['text'] = [n.replace('$','') for n in uw['text']]
uw['text'] = [n.replace(':','') for n in uw['text']]
uw['text'] = [n.replace('\n',' ') for n in uw['text']]
uw['text'] = [n.replace(' ',' ') for n in uw['text']]



tweets = pd.concat([ss, uw])
tweets.to_sql('tweets', conn, if_exists = 'replace')
return print('done')

get_fin_report.py

import pandas as pd
from bs4 import BeautifulSoup
import requests
from datetime import date
def get_financial_report(ticker):# try:
urlfinancials = 'https://www.marketwatch.com/investing/stock/'+ticker+'/financials'
urlbalancesheet = 'https://www.marketwatch.com/investing/stock/'+ticker+'/financials/balance-sheet'
text_soup_financials = BeautifulSoup(requests.get(urlfinancials).text,"html") #read in
text_soup_balancesheet = BeautifulSoup(requests.get(urlbalancesheet).text,"html") #read in
# build lists for Income statement
titlesfinancials = text_soup_financials.findAll('td', {'class': 'rowTitle'})
epslist=[]
netincomelist = []
longtermdebtlist = []
interestexpenselist = []
ebitdalist= []
for title in titlesfinancials:
if 'EPS (Basic)' in title.text:
epslist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'Net Income' in title.text:
netincomelist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'Interest Expense' in title.text:
interestexpenselist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'EBITDA' in title.text:
ebitdalist.append ([td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
# find the table headers for the Balance sheet
titlesbalancesheet = text_soup_balancesheet.findAll('td', {'class': 'rowTitle'})
equitylist=[]
for title in titlesbalancesheet:
if 'Total Shareholders\' Equity' in title.text:
equitylist.append( [td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
if 'Long-Term Debt' in title.text:
longtermdebtlist.append( [td.text for td in title.findNextSiblings(attrs={'class': 'valueCell'}) if td.text])
#get the data from the income statement lists
#use helper function get_element
eps = get_element(epslist,0)
epsGrowth = get_element(epslist,1)
netIncome = get_element(netincomelist,0)
shareholderEquity = get_element(equitylist,0)
roa = get_element(equitylist,1)
longtermDebt = get_element(longtermdebtlist,0)
interestExpense = get_element(interestexpenselist,0)
ebitda = get_element(ebitdalist,0)
# load all the data into dataframe
fin_df= pd.DataFrame({'eps': eps,'eps Growth': epsGrowth,'net Income': netIncome,'shareholder Equity': shareholderEquity,'roa':
roa,'longterm Debt': longtermDebt,'interest Expense': interestExpense,'ebitda': ebitda},index=range(date.today().year-5,date.today().year))

fin_df.reset_index(inplace=True)

return fin_df
def get_element(list,element):
try:
return list[element]
except:
return '-'

index.py

import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import dash_table
from dash.exceptions import PreventUpdate
import flask
from flask import Flask
import pandas as pd
import dateutil.relativedelta
from datetime import date
import datetime
import yfinance as yf
import numpy as np
import praw
import sqlite3
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from dash_utils import make_table, make_card, ticker_inputs, make_item
from reddit_data import get_reddit
from tweet_data import get_options_flow
from fin_report_data import get_financial_report #, get_financial_reportformatted
conn = sqlite3.connect('stocks.sqlite')server = Flask(__name__)
app = dash.Dash(__name__,server = server ,meta_tags=[{ "content": "width=device-width"}], external_stylesheets=[dbc.themes.BOOTSTRAP])
app.config.suppress_callback_exceptions = Trueget_options_flow()
flow = pd.read_sql("select datetime, text from tweets order by datetime desc", conn)
global dfr
dfr = get_reddit()

layout1 = html.Div([
dbc.Row([dbc.Col(make_card("Enter Ticker", "success", ticker_inputs('ticker-input', 'date-picker', 36)))]) #row 1
,dbc.Row([dbc.Col([make_card("Twitter Order Flow", 'primary', make_table('table-sorting-filtering2', flow, '17px', 10))])
,dbc.Col([make_card("Fin table ", "secondary", html.Div(id="fin-table"))])
])
, dbc.Row([make_card("select ticker", "warning", "select ticker")],id = 'cards') #row 2
, dbc.Row([
dbc.Col([
dbc.Row([make_card("Wallstreet Bets New Posts"
, 'primary'
,[html.P(html.Button('Refresh', id='refresh'))
, make_table('table-sorting-filtering', dfr, '17px', 4)]
)], justify = 'center')
])
,dbc.Col([dbc.Row([dbc.Alert("__Charts__", color="primary")], justify = 'center')
,dbc.Row(html.Div(id='x-vol-1'), justify = 'center')
, dcc.Interval(
id='interval-component',
interval=1*150000, # in milliseconds
n_intervals=0)
, dcc.Interval(
id='interval-component2',
interval=1*60000, # in milliseconds
n_intervals=0)
,dbc.Row([html.Div(id='tweets')])
])#end col
])#end row
]) #end div
app.layout= layout1operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part):
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3@app.callback(Output('cards', 'children'),
[Input('ticker-input', 'value')])
def refresh_cards(ticker):
ticker = ticker.upper()
if ticker is None:
TICKER = 'MSFT'
else:
TICKER = yf.Ticker(ticker)

cards = [ dbc.Col(make_card("Previous Close ", "secondary", TICKER.info['previousClose']))
, dbc.Col(make_card("Open", "secondary", TICKER.info['open']))
, dbc.Col(make_card("Sector", 'secondary', TICKER.info['sector']))
, dbc.Col(make_card("Beta", 'secondary', TICKER.info['beta']))
, dbc.Col(make_card("50d Avg Price", 'secondary', TICKER.info['fiftyDayAverage']))
, dbc.Col(make_card("Avg 10d Vol", 'secondary', TICKER.info['averageVolume10days']))
] #end cards list
return cards
@app.callback(
[Output(f"collapse-{i}", "is_open") for i in range(1, 4)],
[Input(f"group-{i}-toggle", "n_clicks") for i in range(1, 4)],
[State(f"collapse-{i}", "is_open") for i in range(1, 4)],
)
def toggle_accordion(n1, n2, n3, is_open1, is_open2, is_open3):
ctx = dash.callback_context
if not ctx.triggered:
return ""
else:
button_id = ctx.triggered[0]["prop_id"].split(".")[0]
if button_id == "group-1-toggle" and n1:
return not is_open1, False, False
elif button_id == "group-2-toggle" and n2:
return False, not is_open2, False
elif button_id == "group-3-toggle" and n3:
return False, False, not is_open3
return False, False, False
@app.callback(Output('x-vol-1', 'children'),
[Input('ticker-input', 'value')
, Input('date-picker', 'start_date')
, Input('date-picker', 'end_date')
, Input('interval-component', 'n_intervals')
])
def create_graph(ticker,startdate, enddate, n):
ticker = ticker.upper()
df1 = yf.download(ticker,startdate, enddate)
df1.reset_index(inplace=True)

fig1 = go.Figure(data=[go.Candlestick(x=df1['Date'],
open=df1['Open'], high=df1['High'],
low=df1['Low'], close=df1['Close'])
])
df2 = yf.download(ticker, period = "5d", interval = "1m")
df2.reset_index(inplace=True)

fig2 = go.Figure(data=[go.Candlestick(x=df2['Datetime'],
open=df2['Open'], high=df2['High'],
low=df2['Low'], close=df2['Close'])
])
df3 = yf.download(ticker, period = "1d", interval = "1m")
df3.reset_index(inplace=True)

fig3 = go.Figure(data=[go.Candlestick(x=df3['Datetime'],
open=df3['Open'], high=df3['High'],
low=df3['Low'], close=df3['Close'])
])

accordion = html.Div([make_item("Daily Chart", dcc.Graph(figure = fig1), 1 )
, make_item("5d 5m Chart",dcc.Graph( figure = fig2), 2)
, make_item("1d 1m Chart", dcc.Graph(figure = fig3), 3)
], className="accordion")
return accordion
@app.callback(
Output('tweets', 'children'),
[Input('interval-component2', 'n_intervals'),
])
def new_tweets(n):
get_options_flow()
return html.P(f"Reloaded Tweets {n}")
@app.callback(
Output('table-sorting-filtering', 'data'),
[Input('table-sorting-filtering', "page_current"),
Input('table-sorting-filtering', "page_size"),
Input('table-sorting-filtering', 'sort_by'),
Input('table-sorting-filtering', 'filter_query'),
Input('refresh', 'n_clicks')])
def update_table(page_current, page_size, sort_by, filter, n_clicks):
filtering_expressions = filter.split(' && ')
if n_clicks is None:
raise PreventUpdate
else:
dff = get_reddit()
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
if len(sort_by):
dff = dff.sort_values(
[col['column_id'] for col in sort_by],
ascending=[
col['direction'] == 'asc'
for col in sort_by
],
inplace=False)
page = page_current
size = page_size
return dff.iloc[page * size: (page + 1) * size].to_dict('records')
@app.callback(
Output('table-sorting-filtering2', 'data'),
[Input('table-sorting-filtering2', "page_current"),
Input('table-sorting-filtering2', "page_size"),
Input('table-sorting-filtering2', 'sort_by'),
Input('table-sorting-filtering2', 'filter_query'),
Input('interval-component', 'n_intervals')
])
def update_table2(page_current, page_size, sort_by, filter, n):
filtering_expressions = filter.split(' && ')
conn = sqlite3.connect('stocks.sqlite')
flow = pd.read_sql("select datetime, text, source from tweets order by datetime desc", conn)
dff = flow
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
if len(sort_by):
dff = dff.sort_values(
[col['column_id'] for col in sort_by],
ascending=[
col['direction'] == 'asc'
for col in sort_by
],
inplace=False
)
page = page_current
size = page_size
return dff.iloc[page * size: (page + 1) * size].to_dict('records')
@app.callback(Output('fin-table', 'children'),
[Input('ticker-input', 'value')])
def fin_report(sym):
sym = sym.upper()
df = get_financial_report(sym)
#table = make_table('table-sorting-filtering3', df, '20px',8)
table = dbc.Table.from_dataframe(df, striped=True, bordered=True, hover=True)
return tableif __name__ == '__main__':
app.run_server()

The Startup

Medium's largest active publication, followed by +720K people. Follow to join our community.

Eric Kleppen

Written by

Software Product Analyst in Data Science. pythondashboards.com Top writer in Business www.linkedin.com/in/erickleppen01/

The Startup

Medium's largest active publication, followed by +720K people. Follow to join our community.

Eric Kleppen

Written by

Software Product Analyst in Data Science. pythondashboards.com Top writer in Business www.linkedin.com/in/erickleppen01/

The Startup

Medium's largest active publication, followed by +720K people. Follow to join our community.

Medium is an open platform where 170 million readers come to find insightful and dynamic thinking. Here, expert and undiscovered voices alike dive into the heart of any topic and bring new ideas to the surface. Learn more

Follow the writers, publications, and topics that matter to you, and you’ll see them on your homepage and in your inbox. Explore

If you have a story to tell, knowledge to share, or a perspective to offer — welcome home. It’s easy and free to post your thinking on any topic. Write on Medium

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store