csp_layer.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import torch
  3. import torch.nn as nn
  4. from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
  5. from mmengine.model import BaseModule
  6. from torch import Tensor
  7. from typing import Sequence, Union
  8. from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
  9. from .se_layer import ChannelAttention
  10. class DarknetBottleneck(BaseModule):
  11. """The basic bottleneck block used in Darknet.
  12. Each ResBlock consists of two ConvModules and the input is added to the
  13. final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.
  14. The first convLayer has filter size of 1x1 and the second one has the
  15. filter size of 3x3.
  16. Args:
  17. in_channels (int): The input channels of this Module.
  18. out_channels (int): The output channels of this Module.
  19. expansion (float): The kernel size of the convolution.
  20. Defaults to 0.5.
  21. add_identity (bool): Whether to add identity to the out.
  22. Defaults to True.
  23. use_depthwise (bool): Whether to use depthwise separable convolution.
  24. Defaults to False.
  25. conv_cfg (dict): Config dict for convolution layer. Defaults to None,
  26. which means using conv2d.
  27. norm_cfg (dict): Config dict for normalization layer.
  28. Defaults to dict(type='BN').
  29. act_cfg (dict): Config dict for activation layer.
  30. Defaults to dict(type='Swish').
  31. """
  32. def __init__(self,
  33. in_channels: int,
  34. out_channels: int,
  35. expansion: float = 0.5,
  36. add_identity: bool = True,
  37. use_depthwise: bool = False,
  38. conv_cfg: OptConfigType = None,
  39. norm_cfg: ConfigType = dict(
  40. type='BN', momentum=0.03, eps=0.001),
  41. act_cfg: ConfigType = dict(type='Swish'),
  42. init_cfg: OptMultiConfig = None) -> None:
  43. super().__init__(init_cfg=init_cfg)
  44. hidden_channels = int(out_channels * expansion)
  45. conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
  46. self.conv1 = ConvModule(
  47. in_channels,
  48. hidden_channels,
  49. 1,
  50. conv_cfg=conv_cfg,
  51. norm_cfg=norm_cfg,
  52. act_cfg=act_cfg)
  53. self.conv2 = conv(
  54. hidden_channels,
  55. out_channels,
  56. 3,
  57. stride=1,
  58. padding=1,
  59. conv_cfg=conv_cfg,
  60. norm_cfg=norm_cfg,
  61. act_cfg=act_cfg)
  62. self.add_identity = \
  63. add_identity and in_channels == out_channels
  64. def forward(self, x: Tensor) -> Tensor:
  65. """Forward function."""
  66. identity = x
  67. out = self.conv1(x)
  68. out = self.conv2(out)
  69. if self.add_identity:
  70. return out + identity
  71. else:
  72. return out
  73. class CSPNeXtBlock(BaseModule):
  74. """The basic bottleneck block used in CSPNeXt.
  75. Args:
  76. in_channels (int): The input channels of this Module.
  77. out_channels (int): The output channels of this Module.
  78. expansion (float): Expand ratio of the hidden channel. Defaults to 0.5.
  79. add_identity (bool): Whether to add identity to the out. Only works
  80. when in_channels == out_channels. Defaults to True.
  81. use_depthwise (bool): Whether to use depthwise separable convolution.
  82. Defaults to False.
  83. kernel_size (int): The kernel size of the second convolution layer.
  84. Defaults to 5.
  85. conv_cfg (dict): Config dict for convolution layer. Defaults to None,
  86. which means using conv2d.
  87. norm_cfg (dict): Config dict for normalization layer.
  88. Defaults to dict(type='BN', momentum=0.03, eps=0.001).
  89. act_cfg (dict): Config dict for activation layer.
  90. Defaults to dict(type='SiLU').
  91. init_cfg (:obj:`ConfigDict` or dict or list[dict] or
  92. list[:obj:`ConfigDict`], optional): Initialization config dict.
  93. Defaults to None.
  94. """
  95. def __init__(self,
  96. in_channels: int,
  97. out_channels: int,
  98. expansion: float = 0.5,
  99. add_identity: bool = True,
  100. use_depthwise: bool = False,
  101. kernel_size: int = 5,
  102. conv_cfg: OptConfigType = None,
  103. norm_cfg: ConfigType = dict(
  104. type='BN', momentum=0.03, eps=0.001),
  105. act_cfg: ConfigType = dict(type='SiLU'),
  106. init_cfg: OptMultiConfig = None) -> None:
  107. super().__init__(init_cfg=init_cfg)
  108. hidden_channels = int(out_channels * expansion)
  109. conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
  110. self.conv1 = conv(
  111. in_channels,
  112. hidden_channels,
  113. 3,
  114. stride=1,
  115. padding=1,
  116. norm_cfg=norm_cfg,
  117. act_cfg=act_cfg)
  118. self.conv2 = DepthwiseSeparableConvModule(
  119. hidden_channels,
  120. out_channels,
  121. kernel_size,
  122. stride=1,
  123. padding=kernel_size // 2,
  124. conv_cfg=conv_cfg,
  125. norm_cfg=norm_cfg,
  126. act_cfg=act_cfg)
  127. self.add_identity = \
  128. add_identity and in_channels == out_channels
  129. def forward(self, x: Tensor) -> Tensor:
  130. """Forward function."""
  131. identity = x
  132. out = self.conv1(x)
  133. out = self.conv2(out)
  134. if self.add_identity:
  135. return out + identity
  136. else:
  137. return out
  138. class CSPLayer(BaseModule):
  139. """Cross Stage Partial Layer.
  140. Args:
  141. in_channels (int): The input channels of the CSP layer.
  142. out_channels (int): The output channels of the CSP layer.
  143. expand_ratio (float): Ratio to adjust the number of channels of the
  144. hidden layer. Defaults to 0.5.
  145. num_blocks (int): Number of blocks. Defaults to 1.
  146. add_identity (bool): Whether to add identity in blocks.
  147. Defaults to True.
  148. use_cspnext_block (bool): Whether to use CSPNeXt block.
  149. Defaults to False.
  150. use_depthwise (bool): Whether to use depthwise separable convolution in
  151. blocks. Defaults to False.
  152. channel_attention (bool): Whether to add channel attention in each
  153. stage. Defaults to True.
  154. conv_cfg (dict, optional): Config dict for convolution layer.
  155. Defaults to None, which means using conv2d.
  156. norm_cfg (dict): Config dict for normalization layer.
  157. Defaults to dict(type='BN')
  158. act_cfg (dict): Config dict for activation layer.
  159. Defaults to dict(type='Swish')
  160. init_cfg (:obj:`ConfigDict` or dict or list[dict] or
  161. list[:obj:`ConfigDict`], optional): Initialization config dict.
  162. Defaults to None.
  163. """
  164. def __init__(self,
  165. in_channels: int,
  166. out_channels: int,
  167. expand_ratio: float = 0.5,
  168. num_blocks: int = 1,
  169. add_identity: bool = True,
  170. use_depthwise: bool = False,
  171. use_cspnext_block: bool = False,
  172. channel_attention: bool = False,
  173. conv_cfg: OptConfigType = None,
  174. norm_cfg: ConfigType = dict(
  175. type='BN', momentum=0.03, eps=0.001),
  176. act_cfg: ConfigType = dict(type='Swish'),
  177. init_cfg: OptMultiConfig = None) -> None:
  178. super().__init__(init_cfg=init_cfg)
  179. block = CSPNeXtBlock if use_cspnext_block else DarknetBottleneck
  180. mid_channels = int(out_channels * expand_ratio)
  181. self.channel_attention = channel_attention
  182. self.main_conv = ConvModule(
  183. in_channels,
  184. mid_channels,
  185. 1,
  186. conv_cfg=conv_cfg,
  187. norm_cfg=norm_cfg,
  188. act_cfg=act_cfg)
  189. self.short_conv = ConvModule(
  190. in_channels,
  191. mid_channels,
  192. 1,
  193. conv_cfg=conv_cfg,
  194. norm_cfg=norm_cfg,
  195. act_cfg=act_cfg)
  196. self.final_conv = ConvModule(
  197. 2 * mid_channels,
  198. out_channels,
  199. 1,
  200. conv_cfg=conv_cfg,
  201. norm_cfg=norm_cfg,
  202. act_cfg=act_cfg)
  203. self.blocks = nn.Sequential(*[
  204. block(
  205. mid_channels,
  206. mid_channels,
  207. 1.0,
  208. add_identity,
  209. use_depthwise,
  210. conv_cfg=conv_cfg,
  211. norm_cfg=norm_cfg,
  212. act_cfg=act_cfg) for _ in range(num_blocks)
  213. ])
  214. if channel_attention:
  215. self.attention = ChannelAttention(2 * mid_channels)
  216. def forward(self, x: Tensor) -> Tensor:
  217. """Forward function."""
  218. x_short = self.short_conv(x)
  219. x_main = self.main_conv(x)
  220. x_main = self.blocks(x_main)
  221. x_final = torch.cat((x_main, x_short), dim=1)
  222. if self.channel_attention:
  223. x_final = self.attention(x_final)
  224. return self.final_conv(x_final)
  225. class YoloV8Bottleneck(DarknetBottleneck):
  226. """The basic bottleneck block used in Darknet.
  227. Each ResBlock consists of two ConvModules and the input is added to the
  228. final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.
  229. The first convLayer has filter size of k1Xk1 and the second one has the
  230. filter size of k2Xk2.
  231. Note:
  232. This DarknetBottleneck is little different from MMDet's, we can
  233. change the kernel size and padding for each conv.
  234. Args:
  235. in_channels (int): The input channels of this Module.
  236. out_channels (int): The output channels of this Module.
  237. expansion (float): The kernel size for hidden channel.
  238. Defaults to 0.5.
  239. kernel_size (Sequence[int]): The kernel size of the convolution.
  240. Defaults to (1, 3).
  241. padding (Sequence[int]): The padding size of the convolution.
  242. Defaults to (0, 1).
  243. add_identity (bool): Whether to add identity to the out.
  244. Defaults to True
  245. use_depthwise (bool): Whether to use depthwise separable convolution.
  246. Defaults to False
  247. conv_cfg (dict): Config dict for convolution layer. Default: None,
  248. which means using conv2d.
  249. norm_cfg (dict): Config dict for normalization layer.
  250. Defaults to dict(type='BN').
  251. act_cfg (dict): Config dict for activation layer.
  252. Defaults to dict(type='Swish').
  253. """
  254. def __init__(self,
  255. in_channels: int,
  256. out_channels: int,
  257. expansion: float = 0.5,
  258. kernel_size: Sequence[int] = (1, 3),
  259. padding: Sequence[int] = (0, 1),
  260. add_identity: bool = True,
  261. use_depthwise: bool = False,
  262. conv_cfg: OptConfigType = None,
  263. norm_cfg: ConfigType = dict(
  264. type='BN', momentum=0.03, eps=0.001),
  265. act_cfg: ConfigType = dict(type='SiLU', inplace=True),
  266. init_cfg: OptMultiConfig = None) -> None:
  267. super().__init__(in_channels, out_channels, init_cfg=init_cfg)
  268. hidden_channels = int(out_channels * expansion)
  269. conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
  270. assert isinstance(kernel_size, Sequence) and len(kernel_size) == 2
  271. self.conv1 = ConvModule(
  272. in_channels,
  273. hidden_channels,
  274. kernel_size[0],
  275. padding=padding[0],
  276. conv_cfg=conv_cfg,
  277. norm_cfg=norm_cfg,
  278. act_cfg=act_cfg)
  279. self.conv2 = conv(
  280. hidden_channels,
  281. out_channels,
  282. kernel_size[1],
  283. stride=1,
  284. padding=padding[1],
  285. conv_cfg=conv_cfg,
  286. norm_cfg=norm_cfg,
  287. act_cfg=act_cfg)
  288. self.add_identity = \
  289. add_identity and in_channels == out_channels
  290. class CSPLayerWithTwoConv(BaseModule):
  291. """Cross Stage Partial Layer with 2 convolutions.
  292. Args:
  293. in_channels (int): The input channels of the CSP layer.
  294. out_channels (int): The output channels of the CSP layer.
  295. expand_ratio (float): Ratio to adjust the number of channels of the
  296. hidden layer. Defaults to 0.5.
  297. num_blocks (int): Number of blocks. Defaults to 1
  298. add_identity (bool): Whether to add identity in blocks.
  299. Defaults to True.
  300. conv_cfg (dict, optional): Config dict for convolution layer.
  301. Defaults to None, which means using conv2d.
  302. norm_cfg (dict): Config dict for normalization layer.
  303. Defaults to dict(type='BN').
  304. act_cfg (dict): Config dict for activation layer.
  305. Defaults to dict(type='SiLU', inplace=True).
  306. init_cfg (:obj:`ConfigDict` or dict or list[dict] or
  307. list[:obj:`ConfigDict`], optional): Initialization config dict.
  308. Defaults to None.
  309. """
  310. def __init__(
  311. self,
  312. in_channels: int,
  313. out_channels: int,
  314. expand_ratio: float = 0.5,
  315. num_blocks: int = 1,
  316. add_identity: bool = True, # shortcut
  317. conv_cfg: OptConfigType = None,
  318. norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
  319. act_cfg: ConfigType = dict(type='SiLU', inplace=True),
  320. init_cfg: OptMultiConfig = None) -> None:
  321. super().__init__(init_cfg=init_cfg)
  322. self.mid_channels = int(out_channels * expand_ratio)
  323. self.main_conv = ConvModule(
  324. in_channels,
  325. 2 * self.mid_channels,
  326. 1,
  327. conv_cfg=conv_cfg,
  328. norm_cfg=norm_cfg,
  329. act_cfg=act_cfg)
  330. self.final_conv = ConvModule(
  331. (2 + num_blocks) * self.mid_channels,
  332. out_channels,
  333. 1,
  334. conv_cfg=conv_cfg,
  335. norm_cfg=norm_cfg,
  336. act_cfg=act_cfg)
  337. self.blocks = nn.ModuleList(
  338. YoloV8Bottleneck(
  339. self.mid_channels,
  340. self.mid_channels,
  341. expansion=1,
  342. kernel_size=(3, 3),
  343. padding=(1, 1),
  344. add_identity=add_identity,
  345. use_depthwise=False,
  346. conv_cfg=conv_cfg,
  347. norm_cfg=norm_cfg,
  348. act_cfg=act_cfg) for _ in range(num_blocks))
  349. def forward(self, x: Tensor) -> Tensor:
  350. """Forward process."""
  351. x_main = self.main_conv(x)
  352. x_main = list(x_main.split((self.mid_channels, self.mid_channels), 1))
  353. x_main.extend(blocks(x_main[-1]) for blocks in self.blocks)
  354. return self.final_conv(torch.cat(x_main, 1))
  355. #should try switch to deploy with only 3*3 conv
  356. class SPPFBottleneck(BaseModule):
  357. """Spatial pyramid pooling - Fast (SPPF) layer for
  358. YOLOv5, YOLOX and PPYOLOE by Glenn Jocher
  359. Args:
  360. in_channels (int): The input channels of this Module.
  361. out_channels (int): The output channels of this Module.
  362. kernel_sizes (int, tuple[int]): Sequential or number of kernel
  363. sizes of pooling layers. Defaults to 5.
  364. use_conv_first (bool): Whether to use conv before pooling layer.
  365. In YOLOv5 and YOLOX, the para set to True.
  366. In PPYOLOE, the para set to False.
  367. Defaults to True.
  368. mid_channels_scale (float): Channel multiplier, multiply in_channels
  369. by this amount to get mid_channels. This parameter is valid only
  370. when use_conv_fist=True.Defaults to 0.5.
  371. conv_cfg (dict): Config dict for convolution layer. Defaults to None.
  372. which means using conv2d. Defaults to None.
  373. norm_cfg (dict): Config dict for normalization layer.
  374. Defaults to dict(type='BN', momentum=0.03, eps=0.001).
  375. act_cfg (dict): Config dict for activation layer.
  376. Defaults to dict(type='SiLU', inplace=True).
  377. init_cfg (dict or list[dict], optional): Initialization config dict.
  378. Defaults to None.
  379. """
  380. def __init__(self,
  381. in_channels: int,
  382. out_channels: int,
  383. kernel_sizes: Union[int, Sequence[int]] = 5,
  384. use_conv_first: bool = True,
  385. mid_channels_scale: float = 0.5,
  386. conv_cfg: ConfigType = None,
  387. norm_cfg: ConfigType = dict(
  388. type='BN', momentum=0.03, eps=0.001),
  389. act_cfg: ConfigType = dict(type='SiLU', inplace=True),
  390. init_cfg: OptMultiConfig = None):
  391. super().__init__(init_cfg)
  392. if use_conv_first:
  393. mid_channels = int(in_channels * mid_channels_scale)
  394. self.conv1 = ConvModule(
  395. in_channels,
  396. mid_channels,
  397. 1,
  398. stride=1,
  399. conv_cfg=conv_cfg,
  400. norm_cfg=norm_cfg,
  401. act_cfg=act_cfg)
  402. else:
  403. mid_channels = in_channels
  404. self.conv1 = None
  405. self.kernel_sizes = kernel_sizes
  406. if isinstance(kernel_sizes, int):
  407. self.poolings = nn.MaxPool2d(
  408. kernel_size=kernel_sizes, stride=1, padding=kernel_sizes // 2)
  409. conv2_in_channels = mid_channels * 4
  410. else:
  411. self.poolings = nn.ModuleList([
  412. nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
  413. for ks in kernel_sizes
  414. ])
  415. conv2_in_channels = mid_channels * (len(kernel_sizes) + 1)
  416. self.conv2 = ConvModule(
  417. conv2_in_channels,
  418. out_channels,
  419. 1,
  420. conv_cfg=conv_cfg,
  421. norm_cfg=norm_cfg,
  422. act_cfg=act_cfg)
  423. def forward(self, x: Tensor) -> Tensor:
  424. """Forward process
  425. Args:
  426. x (Tensor): The input tensor.
  427. """
  428. if self.conv1:
  429. x = self.conv1(x)
  430. if isinstance(self.kernel_sizes, int):
  431. y1 = self.poolings(x)
  432. y2 = self.poolings(y1)
  433. x = torch.cat([x, y1, y2, self.poolings(y2)], dim=1)
  434. else:
  435. x = torch.cat(
  436. [x] + [pooling(x) for pooling in self.poolings], dim=1)
  437. x = self.conv2(x)
  438. return x