- 后端:调整 product 项目,本地开发不开启文档

- 后端:product 项目,引入 seata 分布式事务
This commit is contained in:
YunaiV 2019-05-13 23:38:25 +08:00
parent a447d97789
commit 8f59051a62
8 changed files with 141 additions and 8 deletions

View File

@ -80,8 +80,8 @@ TODO 此处应有一个架构图的装逼 JPG 图。
| `admin-web` | 【前端】管理后台 | HTTP 8080 | |
| `mobile-web` | 【前端】商城 H5 | HTTP 8000 | |
| `admin-application` | 管理员 HTTP 服务 | HTTP 18083 | [接口文档](http://api.shop.iocoder.cn:18099/admin-api/doc.html) |
| `user-application` | 用户 HTTP 服务 | HTTP 18082 |[接口文档](http://api.shop.iocoder.cn:18099/user-api/doc.html) |
| `product-application` | 商品 HTTP 服务 | HTTP 18081 | |
| `user-application` | 用户 HTTP 服务 | HTTP 18082 | [接口文档](http://api.shop.iocoder.cn:18099/user-api/doc.html) |
| `product-application` | 商品 HTTP 服务 | HTTP 18081 | [接口文档](http://api.shop.iocoder.cn:18099/product-api/doc.html) |
| `pay-application` | 支付 HTTP 服务 | HTTP 18084 | |
| `promotion-application` | 促销 HTTP 服务 | HTTP 18085 | |
| `search-application` | 搜索 HTTP 服务 | HTTP 18086 | |

View File

@ -0,0 +1,6 @@
swagger:
enable: true
title: 商品子系统
description: 商品子系统
version: 1.0.0
base-package: cn.iocoder.mall.product.application.controller

View File

@ -9,7 +9,4 @@ server:
context-path: /product-api/
swagger:
title: 商品子系统
description: 商品子系统
version: 1.0.0
base-package: cn.iocoder.mall.product.application.controller
enable: false

View File

@ -44,6 +44,15 @@
<artifactId>mybatis-plus-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-spring</artifactId>
</dependency>
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-dubbo</artifactId>
</dependency>
<!-- RPC 相关 -->
<dependency>
<groupId>org.apache.dubbo</groupId>

View File

@ -1,14 +1,48 @@
package cn.iocoder.mall.product.config;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceBuilder;
import io.seata.rm.datasource.DataSourceProxy;
import io.seata.spring.annotation.GlobalTransactionScanner;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Primary;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.sql.DataSource;
@Configuration
@MapperScan("cn.iocoder.mall.product.dao") // 扫描对应的 Mapper 接口
@EnableTransactionManagement(proxyTargetClass = true) // 启动事务管理为什么使用 proxyTargetClass 参数参见 https://blog.csdn.net/huang_550/article/details/76492600
public class DatabaseConfiguration {
// 数据源使用 HikariCP
@Value("${spring.application.name}")
private String applicationId;
@Value("${seata.tx-service-group}")
private String txServiceGroup;
}
@Bean("druidDataSource")
@ConfigurationProperties("spring.datasource.druid")
public DruidDataSource druidDataSource(){
return DruidDataSourceBuilder.create().build();
}
@ConfigurationProperties(prefix = "spring.datasource")
@Primary
@Bean("dataSource")
@DependsOn("druidDataSource") // 解决多数据源循环依赖的问题主要发生点在 DataSourceInitializerInvoker
public DataSource dataSource() {
DruidDataSource druidDataSource = druidDataSource();
return new DataSourceProxy(druidDataSource);
}
@Bean
public GlobalTransactionScanner globalTransactionScanner() {
return new GlobalTransactionScanner(applicationId, txServiceGroup);
}
}

View File

@ -39,3 +39,7 @@ rocketmq:
name-server: 127.0.0.1:9876
producer:
group: product-producer-group
# seata
seata:
tx-service-group: my_test_tx_group

View File

@ -0,0 +1,69 @@
transport {
# tcp udt unix-domain-socket
type = "TCP"
#NIO NATIVE
server = "NIO"
#enable heartbeat
heartbeat = true
#thread factory for netty
thread-factory {
boss-thread-prefix = "NettyBoss"
worker-thread-prefix = "NettyServerNIOWorker"
server-executor-thread-prefix = "NettyServerBizHandler"
share-boss-worker = false
client-selector-thread-prefix = "NettyClientSelector"
client-selector-thread-size = 1
client-worker-thread-prefix = "NettyClientWorkerThread"
# netty boss thread size,will not be used for UDT
boss-thread-size = 1
#auto default pin or 8
worker-thread-size = 8
}
}
service {
#vgroup->rgroup
vgroup_mapping.my_test_tx_group = "default"
#only support single node
default.grouplist = "180.167.213.26:8091"
#degrade current not support
enableDegrade = false
#disable
disable = false
}
client {
async.commit.buffer.limit = 10000
lock {
retry.internal = 10
retry.times = 30
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "file_store/data"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
}

View File

@ -0,0 +1,14 @@
registry {
type = "file"
file {
name = "file.conf"
}
zk {
cluster = "default"
serverAddr = "192.168.88.10:2181"
session.timeout = 6000
connect.timeout = 2000
}
}