Spaces:
Sleeping
Sleeping
Commit
·
f9eab93
1
Parent(s):
98b8ce5
Transfer changes from neuse
Browse files- evaluate_demo.py +0 -1
- neus_v/veval/eval.py +3 -3
- vllm_serve.sh +2 -2
evaluate_demo.py
CHANGED
|
@@ -22,7 +22,6 @@ WEIGHT_PATH = Path("./assets/")
|
|
| 22 |
pickle_path = WEIGHT_PATH / "distributions.pkl"
|
| 23 |
num_of_frame_in_sequence = 3
|
| 24 |
model = "InternVL2-8B"
|
| 25 |
-
device = 0
|
| 26 |
# Load the vision-language model
|
| 27 |
vision_language_model = VLLMClient(api_base="http://localhost:8000/v1", model="OpenGVLab/InternVL2_5-8B")
|
| 28 |
# Load distributions
|
|
|
|
| 22 |
pickle_path = WEIGHT_PATH / "distributions.pkl"
|
| 23 |
num_of_frame_in_sequence = 3
|
| 24 |
model = "InternVL2-8B"
|
|
|
|
| 25 |
# Load the vision-language model
|
| 26 |
vision_language_model = VLLMClient(api_base="http://localhost:8000/v1", model="OpenGVLab/InternVL2_5-8B")
|
| 27 |
# Load distributions
|
neus_v/veval/eval.py
CHANGED
|
@@ -226,17 +226,17 @@ def evaluate_video_with_sequence_of_images(
|
|
| 226 |
for proposition, detected_object in object_of_interest.items():
|
| 227 |
proposition_probability_record[proposition].append(detected_object.probability)
|
| 228 |
|
| 229 |
-
video_automaton.add_terminal_state(add_with_terminal_label=
|
| 230 |
sys.stdout.write("\n") # Move to the next line after processing all frames
|
| 231 |
result = model_checker.check_automaton(
|
| 232 |
states=video_automaton.states,
|
| 233 |
transitions=video_automaton.transitions,
|
| 234 |
model_type="dtmc",
|
| 235 |
-
use_filter=
|
| 236 |
)
|
| 237 |
output_log["specification"] = tl_spec
|
| 238 |
output_log["propositions"] = proposition_set
|
| 239 |
-
output_log["probability"] = round(float(str(result)), 6)
|
| 240 |
output_log["min_probability"] = round(float(str(result.min)), 6)
|
| 241 |
output_log["max_probability"] = round(float(str(result.max)), 6)
|
| 242 |
for (
|
|
|
|
| 226 |
for proposition, detected_object in object_of_interest.items():
|
| 227 |
proposition_probability_record[proposition].append(detected_object.probability)
|
| 228 |
|
| 229 |
+
video_automaton.add_terminal_state(add_with_terminal_label=False)
|
| 230 |
sys.stdout.write("\n") # Move to the next line after processing all frames
|
| 231 |
result = model_checker.check_automaton(
|
| 232 |
states=video_automaton.states,
|
| 233 |
transitions=video_automaton.transitions,
|
| 234 |
model_type="dtmc",
|
| 235 |
+
use_filter=False,
|
| 236 |
)
|
| 237 |
output_log["specification"] = tl_spec
|
| 238 |
output_log["propositions"] = proposition_set
|
| 239 |
+
output_log["probability"] = round(float(str(result.at(0))), 6)
|
| 240 |
output_log["min_probability"] = round(float(str(result.min)), 6)
|
| 241 |
output_log["max_probability"] = round(float(str(result.max)), 6)
|
| 242 |
for (
|
vllm_serve.sh
CHANGED
|
@@ -10,5 +10,5 @@ vllm serve $MODEL \
|
|
| 10 |
--port $PORT \
|
| 11 |
--trust-remote-code \
|
| 12 |
--limit-mm-per-prompt image=4 \
|
| 13 |
-
--enforce-eager \
|
| 14 |
-
--max-model-len 16384
|
|
|
|
| 10 |
--port $PORT \
|
| 11 |
--trust-remote-code \
|
| 12 |
--limit-mm-per-prompt image=4 \
|
| 13 |
+
# --enforce-eager \
|
| 14 |
+
# --max-model-len 16384
|