Samuel CHAINEAU commited on
Commit
bda55bf
1 Parent(s): 313066e

QBGPT V1.0

Browse files
Files changed (2) hide show
  1. app.py +9 -9
  2. pages.py +3 -3
app.py CHANGED
@@ -32,18 +32,18 @@ qbgpt = QBGPT(input_vocab_size = input_size,
32
  diag_masks = False,
33
  to_pred_size = moves_to_pred)
34
 
35
- qbgpt.load_weights("app/assets/model_mediumv2/QBGPT")
36
 
37
 
38
- qb_tok = tokenizer(moves_index="./app/assets/moves_index.parquet",
39
- play_index="./app/assets/plays_index.parquet",
40
- positions_index="./app/assets/positions_index.parquet",
41
- scrimmage_index="./app/assets/scrimmage_index.parquet",
42
- starts_index="./app/assets/starts_index.parquet",
43
- time_index="./app/assets/time_index.parquet",
44
  window_size=20)
45
 
46
- with open('./app/assets/ref.json', 'r') as fp:
47
  ref_json = json.load(fp)
48
 
49
  def convert_numpy(d):
@@ -51,7 +51,7 @@ def convert_numpy(d):
51
 
52
  ref_json = {int(k):convert_numpy(v) for k,v in ref_json.items()}
53
 
54
- ref_df = pd.read_json("./app/assets/ref_df.json")
55
 
56
 
57
 
 
32
  diag_masks = False,
33
  to_pred_size = moves_to_pred)
34
 
35
+ qbgpt.load_weights("assets/model_mediumv2/QBGPT")
36
 
37
 
38
+ qb_tok = tokenizer(moves_index="./assets/moves_index.parquet",
39
+ play_index="./assets/plays_index.parquet",
40
+ positions_index="./assets/positions_index.parquet",
41
+ scrimmage_index="./assets/scrimmage_index.parquet",
42
+ starts_index="./assets/starts_index.parquet",
43
+ time_index="./assets/time_index.parquet",
44
  window_size=20)
45
 
46
+ with open('./assets/ref.json', 'r') as fp:
47
  ref_json = json.load(fp)
48
 
49
  def convert_numpy(d):
 
51
 
52
  ref_json = {int(k):convert_numpy(v) for k,v in ref_json.items()}
53
 
54
+ ref_df = pd.read_json("./assets/ref_df.json")
55
 
56
 
57
 
pages.py CHANGED
@@ -63,7 +63,7 @@ def qb_gpt_page(ref_df, ref, tokenizer, model):
63
  plot_true = pd.DataFrame(step1_true)
64
 
65
  fig_gen = px.line(plot, x="input_ids_x", y="input_ids_y", animation_frame="pos_ids", color="OffDef", symbol="ids",
66
- text="position_ids", title="Player Trajectories Over Time", line_shape="linear",
67
  range_x=[0, 140], range_y=[0, 60], # Set X and Y axis ranges
68
  render_mode="svg") # Render mode for smoother lines
69
 
@@ -73,7 +73,7 @@ def qb_gpt_page(ref_df, ref, tokenizer, model):
73
  st.plotly_chart(fig_gen)
74
 
75
  fig_true = px.line(plot_true, x="input_ids_x", y="input_ids_y", animation_frame="pos_ids", color="OffDef", symbol="ids",
76
- text="position_ids", title="Player Trajectories Over Time",
77
  range_x=[0, 140], range_y=[0, 60], # Set X and Y axis ranges
78
  line_shape="linear", # Draw lines connecting points
79
  render_mode="svg") # Render mode for smoother lines
@@ -106,7 +106,7 @@ def contacts_and_disclaimers():
106
  qb_gpt_transf = """
107
  At the heart of QB-GPT lies the cutting-edge Transformer model, a deep learning architecture known for its prowess in understanding sequential data. It doesn't just create plays; it understands the game at a granular level, taking into account player positions, game situations, and historical data. It relies on the same conceptual approach behind the now famous "GPT" model of OpenAI. It's the playbook of the future, driven by the technology of tomorrow.
108
 
109
- A more detailed blogpost about the model QB-GPT can be found [here](link)
110
  """
111
  st.markdown(qb_gpt_transf)
112
 
 
63
  plot_true = pd.DataFrame(step1_true)
64
 
65
  fig_gen = px.line(plot, x="input_ids_x", y="input_ids_y", animation_frame="pos_ids", color="OffDef", symbol="ids",
66
+ text="position_ids", title="Generated players' trajectories Over Time", line_shape="linear",
67
  range_x=[0, 140], range_y=[0, 60], # Set X and Y axis ranges
68
  render_mode="svg") # Render mode for smoother lines
69
 
 
73
  st.plotly_chart(fig_gen)
74
 
75
  fig_true = px.line(plot_true, x="input_ids_x", y="input_ids_y", animation_frame="pos_ids", color="OffDef", symbol="ids",
76
+ text="position_ids", title="True players' trajectories Over Time",
77
  range_x=[0, 140], range_y=[0, 60], # Set X and Y axis ranges
78
  line_shape="linear", # Draw lines connecting points
79
  render_mode="svg") # Render mode for smoother lines
 
106
  qb_gpt_transf = """
107
  At the heart of QB-GPT lies the cutting-edge Transformer model, a deep learning architecture known for its prowess in understanding sequential data. It doesn't just create plays; it understands the game at a granular level, taking into account player positions, game situations, and historical data. It relies on the same conceptual approach behind the now famous "GPT" model of OpenAI. It's the playbook of the future, driven by the technology of tomorrow.
108
 
109
+ A more detailed blogpost about the model QB-GPT can be found [here](https://medium.com/@sam.chaineau/transformers-can-generate-nfl-plays-introducing-qb-gpt-2d40f16a03eb)
110
  """
111
  st.markdown(qb_gpt_transf)
112