以下の内容はhttps://end0tknr.hateblo.jp/entry/20250713/1752406932より取得しました。


再 - CPUでの Floor-Plan-Detection の間取り図 Raster to Vector 変換

CPUでの Floor-Plan-Detection の間取り図 Raster to Vector 変換 - end0tknr's kipple - web写経開発

先日の上記entryでは、raster to vector後、画面表示するだけでしたので、 部屋と壁の座標をjsonで保存する様、save_vectors_coords() を追加

import os
import sys
from model import get_model
from torch.utils.data import DataLoader
from utils.FloorplanToBlenderLib import *
from utils.loaders.augmentations import RotateNTurns
import config
import cv2
import json
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import utils.plotting

from utils.post_prosessing import split_prediction, get_polygons
import utils.post_prosessing

wall_height = 1
scale       = 100
pkl_path = "model_best_val_loss_var.pkl"

room_classes=["Background","Outdoor","Wall","Kitchen","Living Room","Bed Room",
              "Bath","Entry","Railing","Storage","Garage","Undefined"]
icon_classes=["No Icon","Window","Door","Closet","Electrical Applience",
              "Toilet","Sink","Sauna Bench","Fire Place","Bathtub","Chimney"]


def main():
    # config.py にも image_path がありますが、そちらは不要です
    img_path = sys.argv[1]
    
    """ segmentation 可視化で label毎に異なる色を割当てる color map作成。"""
    utils.plotting.discrete_cmap()

    rot:RotateNTurns = RotateNTurns()
    
    """ https://github.com/CubiCasa/CubiCasa5k/tree/master/floortrans/models """
    model:hg_furukawa_original = get_model('hg_furukawa_original', 51)

    split     = [21, len( room_classes ), len( icon_classes )]
    n_classes = split[0] + split[1] + split[2] # = 44
    """ 最終出力層を上書き (class数44に合わせる) """
    model.conv4_ = torch.nn.Conv2d( 256, n_classes, bias=True, kernel_size=1 )
    """ 出力sizeを入力画像に合わせるための upsampling 層 修正 """
    model.upsample = torch.nn.ConvTranspose2d( n_classes,
                                               n_classes,
                                               kernel_size=4,
                                               stride=4 )

    checkpoint = torch.load( pkl_path, map_location='cpu' )  #CPU
    model.load_state_dict( checkpoint['model_state'] )
    model.eval()        # 推論modeへ切替え
    model.to('cpu')     # CPUで動かす

    
    img = cv2.imread(img_path)  # Create tensor for pytorch
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # correct color channels
    img = 2 * (img / 255.0) - 1 # Image transformation to range (-1,1)
    
    # Move from (h,w,3)--->(3,h,w) as model input dimension is defined like this
    img = np.moveaxis(img, -1, 0)
    
    #img = torch.tensor([img.astype(np.float32)])  # .cuda() 削除 → CPU上に保持
    img = torch.from_numpy(np.expand_dims(img.astype(np.float32), axis=0))

    n_rooms = len( room_classes )
    n_icons = len( icon_classes )

    with torch.no_grad(): # 推論(≠学習)の為、勾配計算 無効化し memory節約
        #Check if shape of image is odd or even
        size_check = np.array([img.shape[2], img.shape[3]]) % 2
        
        height = img.shape[2] - size_check[0]
        width = img.shape[3] - size_check[1]

        rotations = [ ( 0,  0),  #  回転なし → 戻しなし
                      ( 1, -1),  #  90度回転 → -90度戻し
                      ( 2,  2),  # 180度回転 → 180度戻し
                      (-1,  1) ] # -90度回転 → +90度戻し

        pred_count = len(rotations)
        # CPU上で生成
        prediction = torch.zeros([pred_count, n_classes, height, width])

        for i, r in enumerate( rotations ):
            forward, back = r
            rot_image = rot(img, 'tensor', forward) # 画像を正回転
            pred = model(rot_image)                 # 予測
            pred = rot(pred, 'tensor', back)        # 結果の見た目を戻す
            # heatmap点の意味を戻す(例:icon向き)
            pred = rot(pred, 'points', back)

            # model出力 pred を指定のheight, widthにresize(補間)
            pred = F.interpolate(pred,
                                 size=(height, width),
                                 mode='bilinear',
                                 align_corners=True)
            prediction[i] = pred[0]

    # それぞれの回転での予測結果を平均
    prediction = torch.mean(prediction, 0, True)
    
    # 同じ予測結果に、部屋やiconが含まれている為、splitに従い、分割
    heatmaps,rooms,icons = \
        utils.post_prosessing.split_prediction(prediction,(height,width),split)

    # get_polygons()の返り値に wall_coords を追加しました
    polygons, types, room_polygons, room_types, wall_coords = \
        utils.post_prosessing.get_polygons( (heatmaps,rooms,icons), 0.2, [1,2] )
    save_vectors_coords( room_polygons, wall_coords )
    
    # 壁polygon → 3D変換の準備
    wall_polygon_numbers = [i for i, j in enumerate(types) if j['type'] == 'wall']
    boxes = []
    for i, j in enumerate(polygons):
        if i in wall_polygon_numbers:
            temp = [np.array([k]) for k in j]
            boxes.append(np.array(temp))

    verts, faces, wall_amount = transform.create_nx4_verts_and_faces(boxes,
                                                                     wall_height,
                                                                     scale)
    # Create top walls verts
    verts = []
    for box in boxes:
        verts.extend([transform.scale_point_to_vector(box, scale, 0)])

    # create faces
    faces = []
    for room in verts:
        temp = tuple(range(len(room)))
        faces.append([temp])

    # 部屋やiconの画像を生成
    pol_room_seg, pol_icon_seg = utils.plotting.polygons_to_image(
        polygons, types, room_polygons, room_types, height, width )
    
    disp_vectors(pol_room_seg, room_classes, "rooms" )
    disp_vectors(pol_icon_seg, icon_classes, "icons" )


def save_vectors_coords( room_polygons, wall_coords ):

    out_objs = []
    for wall_coord in wall_coords:
        out_objs.append({ "type":"wall", "coord":wall_coord.tolist() })

    for poly in room_polygons:
        if poly.geom_type == "GeometryCollection":
            continue
        if poly.geom_type == "Polygon":
            coord = [list(map(int, pt)) for pt in poly.exterior.coords]
            out_objs.append({ "type":"room", "coord":coord })
            
        if poly.geom_type == "MultiPolygon":
            for sub_poly in poly.geoms:
                coord = [list(map(int, pt)) for pt in sub_poly.exterior.coords]
                out_objs.append({ "type":"room", "coord":coord })
                
    with open("room_walls.json", "w", encoding='utf-8') as f:
        json.dump(out_objs, f)
        
        
def disp_vectors(segments, classes, cmap_name):
    plt.figure(figsize=(12, 12))
    ax = plt.subplot(1, 1, 1)
    ax.axis('off')
    rseg = ax.imshow(segments, cmap=cmap_name, vmin=0,
                     vmax=len(classes) - 0.1)
    cbar = plt.colorbar(rseg,
                        ticks=np.arange( len(classes)) + 0.5,
                        fraction=0.046, pad=0.01)
    cbar.ax.set_yticklabels( classes, fontsize=20)
    plt.tight_layout()
    plt.show()

if __name__ == '__main__':
    main()



以上の内容はhttps://end0tknr.hateblo.jp/entry/20250713/1752406932より取得しました。
このページはhttp://font.textar.tv/のウェブフォントを使用してます

不具合報告/要望等はこちらへお願いします。
モバイルやる夫Viewer Ver0.14