Spaces:
Runtime error
Runtime error
Commit
·
81f39a3
1
Parent(s):
37ca947
Upload app.py
Browse files
app.py
CHANGED
|
@@ -24,6 +24,9 @@ from matplotlib.colors import LightSource
|
|
| 24 |
from tensorflow import keras
|
| 25 |
import pandas as pd
|
| 26 |
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
# Farben definieren
|
| 29 |
cb = [15/255, 25/255, 35/255]
|
|
@@ -32,7 +35,13 @@ w = [242/255, 242/255, 242/255]
|
|
| 32 |
blue = [68/255, 114/255, 196/255]
|
| 33 |
orange = [197/255, 90/255, 17/255]
|
| 34 |
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
|
| 38 |
with tab1:
|
|
@@ -60,7 +69,7 @@ with tab1:
|
|
| 60 |
price = edited_data[:, 2]
|
| 61 |
|
| 62 |
string = col2.text_area(
|
| 63 |
-
'Architektur des neuronalen Netzes. Anzahl der Neuronen in den
|
| 64 |
layers = string.split('\n')
|
| 65 |
|
| 66 |
if st.button('Modell trainieren und Fit-Kurve darstellen'):
|
|
@@ -140,6 +149,111 @@ with tab1:
|
|
| 140 |
|
| 141 |
# %%
|
| 142 |
with tab2:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
text_input = 'Das schöne Allgäu\n' + \
|
| 145 |
'Das wunderbare Allgäu\n' + \
|
|
@@ -157,7 +271,7 @@ with tab2:
|
|
| 157 |
string = st.text_area('', value=text_input, height=275)
|
| 158 |
text = string.split('\n')
|
| 159 |
|
| 160 |
-
if st.button('Modell trainieren und Wort-Vektoren darstellen'):
|
| 161 |
with st.spinner('Der Fit-Prozess kann einige Sekunden dauern ...'):
|
| 162 |
|
| 163 |
vectorizer = tf.keras.layers.TextVectorization(
|
|
@@ -239,5 +353,16 @@ with tab2:
|
|
| 239 |
|
| 240 |
|
| 241 |
# %%
|
| 242 |
-
with
|
| 243 |
-
st.header("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
from tensorflow import keras
|
| 25 |
import pandas as pd
|
| 26 |
|
| 27 |
+
from transformers import pipeline
|
| 28 |
+
import transformers
|
| 29 |
+
|
| 30 |
|
| 31 |
# Farben definieren
|
| 32 |
cb = [15/255, 25/255, 35/255]
|
|
|
|
| 35 |
blue = [68/255, 114/255, 196/255]
|
| 36 |
orange = [197/255, 90/255, 17/255]
|
| 37 |
|
| 38 |
+
|
| 39 |
+
# Pipelines definieren
|
| 40 |
+
en_de_translator = pipeline("translation_de_to_en", model='google/bert2bert_L-24_wmt_de_en')
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
tab1, tab2, tab3, tab4 = st.tabs(
|
| 44 |
+
["Künstliche Neuronale Netze", "Wortvektoren Stimmung", "Wörter Maskieren", "Demos"])
|
| 45 |
|
| 46 |
|
| 47 |
with tab1:
|
|
|
|
| 69 |
price = edited_data[:, 2]
|
| 70 |
|
| 71 |
string = col2.text_area(
|
| 72 |
+
'Architektur des neuronalen Netzes. Anzahl der Neuronen in den verdeckten Schichten', value='4', height=275)
|
| 73 |
layers = string.split('\n')
|
| 74 |
|
| 75 |
if st.button('Modell trainieren und Fit-Kurve darstellen'):
|
|
|
|
| 149 |
|
| 150 |
# %%
|
| 151 |
with tab2:
|
| 152 |
+
text_input_2 = '1: Das schöne Allgäu\n' + \
|
| 153 |
+
'1: So toll hier im Allgäu\n' + \
|
| 154 |
+
'1: Uns gefallen die Berge und Seen\n' + \
|
| 155 |
+
'1: Wir mögen die Landschaft und die Berge\n' + \
|
| 156 |
+
'1: Ganz toll im Allgäu\n' + \
|
| 157 |
+
'1: Wir mögen das Allgäu\n' + \
|
| 158 |
+
'0: Uns gefiel es leider nicht\n' + \
|
| 159 |
+
'0: Bei Regen ist es total langweilig\n' + \
|
| 160 |
+
'0: Ganz langweilig!\n' + \
|
| 161 |
+
'0: So schade, dass es oft Regen gibt\n' + \
|
| 162 |
+
'0: Sehr schade, wir konnten gar nicht skifahren\n' + \
|
| 163 |
+
'0: Das gefiel uns überhaupt nicht'
|
| 164 |
+
|
| 165 |
+
string_2 = st.text_area('', value=text_input_2, height=275)
|
| 166 |
+
texts_2 = string_2.split('\n')
|
| 167 |
+
|
| 168 |
+
text = []
|
| 169 |
+
labels = []
|
| 170 |
+
for element in texts_2:
|
| 171 |
+
if element != '':
|
| 172 |
+
label_element, text_element = element.split(':')
|
| 173 |
+
text.append(text_element)
|
| 174 |
+
labels.append(float(label_element))
|
| 175 |
+
|
| 176 |
+
if st.button('Modell trainieren und Wort-Vektoren darstellen', key=1):
|
| 177 |
+
with st.spinner('Der Fit-Prozess kann einige Sekunden dauern ...'):
|
| 178 |
+
|
| 179 |
+
vectorizer = tf.keras.layers.TextVectorization(
|
| 180 |
+
max_tokens=1000, output_sequence_length=7)
|
| 181 |
+
|
| 182 |
+
vectorizer.adapt(text)
|
| 183 |
+
|
| 184 |
+
model = tf.keras.models.Sequential()
|
| 185 |
+
model.add(vectorizer)
|
| 186 |
+
|
| 187 |
+
model.add(tf.keras.layers.Embedding(vectorizer.vocabulary_size(), 2))
|
| 188 |
+
# model.add(tf.keras.layers.Dropout(0.6))
|
| 189 |
+
model.add(tf.keras.layers.LSTM(1, return_sequences=False, activation='sigmoid'))
|
| 190 |
+
# model.add(tf.keras.layers.Flatten())
|
| 191 |
+
#model.add(tf.keras.layers.Dense(1, activation='sigmoid', use_bias=False, trainable=True))
|
| 192 |
+
|
| 193 |
+
model.summary()
|
| 194 |
+
|
| 195 |
+
model.compile(optimizer='adam', loss='binary_crossentropy',
|
| 196 |
+
metrics=['accuracy'])
|
| 197 |
+
|
| 198 |
+
model.fit(text, labels, epochs=2000, verbose=2)
|
| 199 |
+
|
| 200 |
+
# Word Vektoren grafisch darstellen
|
| 201 |
+
cb = [15/255, 25/255, 35/255]
|
| 202 |
+
cf = [25/255*2, 35/255*2, 45/255*2]
|
| 203 |
+
w = [242/255, 242/255, 242/255]
|
| 204 |
+
blue = [68/255, 114/255, 196/255]
|
| 205 |
+
orange = [197/255, 90/255, 17/255]
|
| 206 |
+
|
| 207 |
+
fig = plt.figure(facecolor=cb, figsize=(7, 7))
|
| 208 |
+
ax = fig.add_subplot()
|
| 209 |
+
ax.tick_params(color=w, labelcolor=w, labelsize=12)
|
| 210 |
+
ax.set_facecolor(cb)
|
| 211 |
+
|
| 212 |
+
y_pred = model.predict(np.array(vectorizer.get_vocabulary(
|
| 213 |
+
include_special_tokens=False)).reshape(-1, 1))
|
| 214 |
+
|
| 215 |
+
embed_model = tf.keras.models.Model(model.input, model.layers[1].output)
|
| 216 |
+
X_embed = embed_model(np.array(vectorizer.get_vocabulary(
|
| 217 |
+
include_special_tokens=False)).reshape(-1, 1))[:, 0, :]
|
| 218 |
+
|
| 219 |
+
# 1. Dimension der Wort-Vektoren auf X-Achse,
|
| 220 |
+
# 2. Dimension auf y-Achse, 3. auf die Z-Achse abbilden
|
| 221 |
+
ax.scatter(X_embed[:, 0], X_embed[:, 1],
|
| 222 |
+
c=y_pred, cmap='coolwarm')
|
| 223 |
+
for i in range(vectorizer.vocabulary_size()-2):
|
| 224 |
+
ax.text(X_embed[i, 0], X_embed[i, 1],
|
| 225 |
+
vectorizer.get_vocabulary(include_special_tokens=False)[i],
|
| 226 |
+
color=w)
|
| 227 |
+
|
| 228 |
+
ax.set_ylim(-2, 2)
|
| 229 |
+
ax.set_xlim(-2, 2)
|
| 230 |
+
|
| 231 |
+
ax.set_xticks([-2, -1, 0, 1, 2])
|
| 232 |
+
ax.set_yticks([-2, -1, 0, 1, 2])
|
| 233 |
+
|
| 234 |
+
ax.spines['bottom'].set_color(w)
|
| 235 |
+
ax.spines['top'].set_color(w)
|
| 236 |
+
ax.spines['right'].set_color(w)
|
| 237 |
+
ax.spines['left'].set_color(w)
|
| 238 |
+
|
| 239 |
+
ax.set_xlabel('Dimension 1', color=w, fontsize=15, labelpad=10)
|
| 240 |
+
ax.set_ylabel('Dimension 2', color=w, fontsize=15, labelpad=10)
|
| 241 |
+
|
| 242 |
+
# get the mappable, the 1st and the 2nd are the x and y axes
|
| 243 |
+
|
| 244 |
+
PCM = ax.get_children()[0]
|
| 245 |
+
cbar = plt.colorbar(PCM, ax=ax, fraction=0.036, pad=0.090)
|
| 246 |
+
cbar.set_ticks([])
|
| 247 |
+
|
| 248 |
+
cbar.set_label(
|
| 249 |
+
'<- positiv Stimmung negativ ->', fontsize=12, color=w, rotation=270, labelpad=12)
|
| 250 |
+
|
| 251 |
+
ax.set_title('Epoche 2000', color=w, fontsize=15)
|
| 252 |
+
st.pyplot(fig)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# %%
|
| 256 |
+
with tab3:
|
| 257 |
|
| 258 |
text_input = 'Das schöne Allgäu\n' + \
|
| 259 |
'Das wunderbare Allgäu\n' + \
|
|
|
|
| 271 |
string = st.text_area('', value=text_input, height=275)
|
| 272 |
text = string.split('\n')
|
| 273 |
|
| 274 |
+
if st.button('Modell trainieren und Wort-Vektoren darstellen', key=2):
|
| 275 |
with st.spinner('Der Fit-Prozess kann einige Sekunden dauern ...'):
|
| 276 |
|
| 277 |
vectorizer = tf.keras.layers.TextVectorization(
|
|
|
|
| 353 |
|
| 354 |
|
| 355 |
# %%
|
| 356 |
+
with tab4:
|
| 357 |
+
# st.header("Übersetzung: Deutsch --> Englisch")
|
| 358 |
+
#st.text("Übersetzung: Deutsch --> Englisch")
|
| 359 |
+
|
| 360 |
+
text_input_3 = 'Wir ünterstützen Unternehmen bei der Datenanalyse durch individuelle Beratung und Projekte mit besonderem Fokus auf maschinelles Lernen und Deep Learning.'
|
| 361 |
+
|
| 362 |
+
string_3 = st.text_area('Übersetzung: Deutsch --> Englisch', value=text_input_3, height=75)
|
| 363 |
+
|
| 364 |
+
if st.button('Ein fertig trainiertes Transformer-Modell von HuggingFace anwenden', key=3):
|
| 365 |
+
with st.spinner('Die Übersetzung kann einige Sekunden dauern ...'):
|
| 366 |
+
|
| 367 |
+
a5 = en_de_translator(string_3)
|
| 368 |
+
st.text(a5)
|