Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ conda activate fmpose_3d
```bash
git clone xxxx.git # clone this repo
# TestPyPI (pre-release/testing build)
pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ fmpose==0.0.5
pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ fmpose3d==0.0.5
# Future Official PyPI release
# pip install fmpose
# pip install fmpose3d
```

## Demo
Expand Down
6 changes: 3 additions & 3 deletions animals/demo/vis_animals.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
from PIL import Image
import matplotlib.gridspec as gridspec
import imageio
from fmpose.animals.common.arguments import opts as parse_args
from fmpose.common.camera import normalize_screen_coordinates, camera_to_world
from fmpose3d.animals.common.arguments import opts as parse_args
from fmpose3d.common.camera import normalize_screen_coordinates, camera_to_world

sys.path.append(os.getcwd())

Expand All @@ -37,7 +37,7 @@
CFM = getattr(module, "Model")
else:
# Load model from installed fmpose package
from fmpose.models import Model as CFM
from fmpose3d.models import Model as CFM

from deeplabcut.pose_estimation_pytorch.apis import superanimal_analyze_images

Expand Down
2 changes: 1 addition & 1 deletion animals/models/model_animals.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from einops import rearrange
from timm.models.layers import DropPath

from fmpose.animals.models.graph_frames import Graph
from fmpose3d.animals.models.graph_frames import Graph


class TimeEmbedding(nn.Module):
Expand Down
8 changes: 4 additions & 4 deletions animals/scripts/main_animal3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
import numpy as np
from tqdm import tqdm
import torch.optim as optim
from fmpose.animals.common.arguments import opts as parse_args
from fmpose.animals.common.utils import *
from fmpose.animals.common.animal3d_dataset import TrainDataset
from fmpose3d.animals.common.arguments import opts as parse_args
from fmpose3d.animals.common.utils import *
from fmpose3d.animals.common.animal3d_dataset import TrainDataset
import time

args = parse_args().parse()
Expand All @@ -30,7 +30,7 @@
CFM = getattr(module, "Model")
else:
# Load model from installed fmpose package
from fmpose.animals.models import Model as CFM
from fmpose3d.animals.models import Model as CFM

def train(opt, actions, train_loader, model, optimizer, epoch):
return step('train', opt, actions, train_loader, model, optimizer, epoch)
Expand Down
10 changes: 5 additions & 5 deletions demo/vis_in_the_wild.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@
sys.path.append(os.getcwd())

# Auto-download checkpoint files if missing
from fmpose.lib.checkpoint.download_checkpoints import ensure_checkpoints
from fmpose3d.lib.checkpoint.download_checkpoints import ensure_checkpoints
ensure_checkpoints()

from fmpose.lib.preprocess import h36m_coco_format, revise_kpts
from fmpose.lib.hrnet.gen_kpts import gen_video_kpts as hrnet_pose
from fmpose.common.arguments import opts as parse_args
from fmpose3d.lib.preprocess import h36m_coco_format, revise_kpts
from fmpose3d.lib.hrnet.gen_kpts import gen_video_kpts as hrnet_pose
from fmpose3d.common.arguments import opts as parse_args

args = parse_args().parse()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
Expand All @@ -30,7 +30,7 @@
spec.loader.exec_module(module)
CFM = getattr(module, 'Model')

from fmpose.common.camera import *
from fmpose3d.common.camera import *

import matplotlib
import matplotlib.pyplot as plt
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
from fmpose.common.utils import project_to_2d
from fmpose3d.common.utils import project_to_2d

def average_aggregation(list_hypothesis):
return torch.mean(torch.stack(list_hypothesis), dim=0)
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from einops import rearrange
from timm.models.layers import DropPath

from fmpose.animals.models.graph_frames import Graph
from fmpose3d.animals.models.graph_frames import Graph

class TimeEmbedding(nn.Module):
def __init__(self, dim: int, hidden_dim: int = 64):
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

import numpy as np

from fmpose.common.camera import normalize_screen_coordinates
from fmpose.common.mocap_dataset import MocapDataset
from fmpose.common.skeleton import Skeleton
from fmpose3d.common.camera import normalize_screen_coordinates
from fmpose3d.common.mocap_dataset import MocapDataset
from fmpose3d.common.skeleton import Skeleton

h36m_skeleton = Skeleton(
parents=[
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import numpy as np
import torch.utils.data as data

from fmpose.common.camera import normalize_screen_coordinates, world_to_camera
from fmpose.common.generator import ChunkedGenerator
from fmpose.common.utils import deterministic_random
from fmpose3d.common.camera import normalize_screen_coordinates, world_to_camera
from fmpose3d.common.generator import ChunkedGenerator
from fmpose3d.common.utils import deterministic_random


class Fusion(data.Dataset):
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
18 changes: 9 additions & 9 deletions fmpose/lib/hrnet/gen_kpts.py → fmpose3d/lib/hrnet/gen_kpts.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,22 @@
import cv2
import copy

from fmpose.lib.hrnet.lib.utils.utilitys import plot_keypoint, PreProcess, write, load_json
from fmpose.lib.hrnet.lib.config import cfg, update_config
from fmpose.lib.hrnet.lib.utils.transforms import *
from fmpose.lib.hrnet.lib.utils.inference import get_final_preds
from fmpose.lib.hrnet.lib.models import pose_hrnet
from fmpose3d.lib.hrnet.lib.utils.utilitys import plot_keypoint, PreProcess, write, load_json
from fmpose3d.lib.hrnet.lib.config import cfg, update_config
from fmpose3d.lib.hrnet.lib.utils.transforms import *
from fmpose3d.lib.hrnet.lib.utils.inference import get_final_preds
from fmpose3d.lib.hrnet.lib.models import pose_hrnet

cfg_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'experiments') + '/'

# Auto-download checkpoints if missing and get checkpoint paths
from fmpose.lib.checkpoint.download_checkpoints import ensure_checkpoints, get_checkpoint_path
from fmpose3d.lib.checkpoint.download_checkpoints import ensure_checkpoints, get_checkpoint_path
ensure_checkpoints()

# Loading human detector model
from fmpose.lib.yolov3.human_detector import load_model as yolo_model
from fmpose.lib.yolov3.human_detector import yolo_human_det as yolo_det
from fmpose.lib.sort.sort import Sort
from fmpose3d.lib.yolov3.human_detector import load_model as yolo_model
from fmpose3d.lib.yolov3.human_detector import yolo_human_det as yolo_det
from fmpose3d.lib.sort.sort import Sort

def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
import torch
import json
import torchvision.transforms as transforms
from fmpose.lib.hrnet.lib.utils.transforms import *
from fmpose3d.lib.hrnet.lib.utils.transforms import *

from fmpose.lib.hrnet.lib.utils.coco_h36m import coco_h36m
from fmpose3d.lib.hrnet.lib.utils.coco_h36m import coco_h36m
import numpy as np

joint_pairs = [[0, 1], [1, 3], [0, 2], [2, 4],
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
import os
import sys

from fmpose.lib.yolov3.util import convert2cpu as cpu
from fmpose.lib.yolov3.util import predict_transform
from fmpose3d.lib.yolov3.util import convert2cpu as cpu
from fmpose3d.lib.yolov3.util import predict_transform


class test_net(nn.Module):
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
import pickle as pkl
import argparse

from fmpose.lib.yolov3.util import *
from fmpose.lib.yolov3.darknet import Darknet
from fmpose.lib.yolov3 import preprocess
from fmpose.lib.checkpoint.download_checkpoints import get_checkpoint_path
from fmpose3d.lib.yolov3.util import *
from fmpose3d.lib.yolov3.darknet import Darknet
from fmpose3d.lib.yolov3 import preprocess
from fmpose3d.lib.checkpoint.download_checkpoints import get_checkpoint_path

cur_dir = os.path.dirname(os.path.realpath(__file__))
project_root = os.path.join(cur_dir, '../../../')
Expand Down
File renamed without changes.
4 changes: 2 additions & 2 deletions fmpose/lib/yolov3/util.py → fmpose3d/lib/yolov3/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
import numpy as np
import cv2
import os.path as osp
from fmpose.lib.yolov3.bbox import bbox_iou
from fmpose.lib.checkpoint.download_checkpoints import get_checkpoint_dir
from fmpose3d.lib.yolov3.bbox import bbox_iou
from fmpose3d.lib.checkpoint.download_checkpoints import get_checkpoint_dir


def get_path(cur_file):
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import torch.nn as nn
import math
from einops import rearrange
from fmpose.models.graph_frames import Graph
from fmpose3d.models.graph_frames import Graph
from functools import partial
from einops import rearrange, repeat
from timm.models.layers import DropPath
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ include = ["fmpose*"]
"*" = ["*.yaml", "*.cfg", "*.names"]

[tool.setuptools.dynamic]
version = {attr = "fmpose.__version__"}
version = {attr = "fmpose3d.__version__"}

[tool.black]
line-length = 100
Expand Down
8 changes: 4 additions & 4 deletions scripts/FMPose3D_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
import torch.optim as optim
from tqdm import tqdm

from fmpose.common import opts, Human36mDataset, Fusion
from fmpose.common.utils import *
from fmpose3d.common import opts, Human36mDataset, Fusion
from fmpose3d.common.utils import *

from fmpose.aggregation_methods import aggregation_RPEA_weighted_by_2D_error
from fmpose3d.aggregation_methods import aggregation_RPEA_weighted_by_2D_error

args = opts().parse()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
Expand All @@ -33,7 +33,7 @@
CFM = getattr(module, "Model")
else:
# Load model from installed fmpose package
from fmpose.models import Model as CFM
from fmpose3d.models import Model as CFM


def test_multi_hypothesis(
Expand Down
10 changes: 5 additions & 5 deletions tests/test_demo_human.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ def test_output_dir(tmp_path):
def test_2d_pose_estimation(test_image_path, test_output_dir):
"""Test that 2D pose estimation runs and produces output."""
# Import here to avoid import issues at collection time
from fmpose.lib.hrnet.gen_kpts import gen_video_kpts as hrnet_pose
from fmpose.lib.preprocess import h36m_coco_format, revise_kpts
from fmpose3d.lib.hrnet.gen_kpts import gen_video_kpts as hrnet_pose
from fmpose3d.lib.preprocess import h36m_coco_format, revise_kpts

# Run 2D pose estimation
keypoints, scores = hrnet_pose(test_image_path, det_dim=416, num_peroson=1, gen_output=True, type='image')
Expand All @@ -52,9 +52,9 @@ def test_2d_pose_estimation(test_image_path, test_output_dir):
def test_demo_pipeline_runs(test_image_path):
"""Test that the full demo pipeline can be imported and key components work."""
# Test imports
from fmpose.lib.hrnet.gen_kpts import gen_video_kpts
from fmpose.lib.preprocess import h36m_coco_format, revise_kpts
from fmpose.models import Model
from fmpose3d.lib.hrnet.gen_kpts import gen_video_kpts
from fmpose3d.lib.preprocess import h36m_coco_format, revise_kpts
from fmpose3d.models import Model

assert gen_video_kpts is not None
assert h36m_coco_format is not None
Expand Down
2 changes: 1 addition & 1 deletion tests/test_model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest
import torch
from fmpose.models import Model
from fmpose3d.models import Model

class Args:
"""Mock args for model configuration."""
Expand Down
2 changes: 1 addition & 1 deletion tests/test_training_pipeline.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
import torch
import torch.optim as optim
from fmpose.models import Model
from fmpose3d.models import Model


class Args:
Expand Down