监控Source端Pg对Flink CDC的影响

1.pom

<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.sunwoda</groupId> <artifactId>pg-test</artifactId> <version>2.0-SNAPSHOT</version> <properties> <java.version>1.8</java.version> <maven.compiler.source>${java.version}</maven.compiler.source> <maven.compiler.target>${java.version}</maven.compiler.target> <fastjson.vsersion>2.0.52</fastjson.vsersion> <postgresql.version>42.2.12</postgresql.version> <mysql.version>8.0.33</mysql.version> <logback.version>1.2.11</logback.version> <lombok.version>1.18.20</lombok.version> </properties> <dependencies> <dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> <version>${postgresql.version}</version> </dependency> <dependency> <groupId>com.mysql</groupId> <artifactId>mysql-connector-j</artifactId> <version>${mysql.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>fastjson</artifactId> <version>${fastjson.vsersion}</version> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <version>${lombok.version}</version> <scope>provided</scope> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>3.0.0</version> <configuration> <descriptorRefs> <descriptorRef>jar-with-dependencies</descriptorRef> </descriptorRefs> <archive> <manifest> <mainClass>com.test.cdc.PgCdcUltimateMonitor</mainClass> </manifest> </archive> </configuration> <executions> <execution> <id>make-assembly</id> <phase>package</phase> <goals> <goal>single</goal> </goals> </execution> </executions> </plugin> </plugins> </build> </project>

2.java代码主类

package com.test.cdc; import com.alibaba.druid.support.json.JSONUtils; import lombok.extern.slf4j.Slf4j; import java.sql.*; import java.util.*; import java.util.Date; @Slf4j public class PgCdcUltimateMonitor { // PostgreSQL 配置(多个数据库) private static final String PG_HOST = ""; private static final int PG_PORT = 5432; private static final String PG_USER = ""; private static final String PG_PASSWORD = ""; private static final List<String> PG_DATABASES = Arrays.asList( "dbName1", "dbName2", "dbName3" ); // Doris 配置 private static final String DORIS_URL = "jdbc:mysql://ip:9030/ods"; private static final String DORIS_USER = ""; private static final String DORIS_PASSWORD = ""; // 采集间隔(5 分钟) private static final long INTERVAL = 60 * 5000; static { try { Class.forName("com.mysql.cj.jdbc.Driver"); } catch (ClassNotFoundException e) { log.error("找不到 MySQL 驱动:{}", e.getMessage()); } } public static void main(String[] args) { new PgCdcUltimateMonitor().start(); } public void start() { log.info("===== PostgreSQL CDC 终极监控程序启动 ====="); new Timer().scheduleAtFixedRate(new MonitorTask(), 0, INTERVAL); } static class MonitorTask extends TimerTask { @Override public void run() { // 遍历所有数据库 for (String dbName : PG_DATABASES) { String pgUrl = String.format("jdbc:postgresql://%s:%d/%s", PG_HOST, PG_PORT, dbName); try (Connection pgConn = DriverManager.getConnection(pgUrl, PG_USER, PG_PASSWORD)) { log.info("[{}] 开始采集数据库: {}", new Date(), dbName); // -------------------------- // 1. 采集 PostgreSQL 指标 // -------------------------- long checkpointsTimed = 0; long checkpointsReq = 0; long walBuffersFull = 0; double cpuUsage = 0.0; double memoryUsage = 0.0; int activeConn = 0; int idleConn = 0; int lockedConn = 0; long xactCommit = 0; long xactRollback = 0; double cacheHitRatio = 0.0; String longTransactionJson = ""; // WAL & Checkpoint try (ResultSet rs = pgConn.prepareStatement( "SELECT checkpoints_timed, checkpoints_req FROM pg_stat_bgwriter" ).executeQuery()) { if (rs.next()) { checkpointsTimed = rs.getLong("checkpoints_timed"); checkpointsReq = rs.getLong("checkpoints_req"); } } // WAL Buffers Full try (ResultSet rs = pgConn.prepareStatement( "SELECT wal_buffers_full FROM pg_stat_wal" ).executeQuery()) { if (rs.next()) { walBuffersFull = rs.getLong("wal_buffers_full"); } } // CPU / Memory(最终稳定版,PostgreSQL 15 已验证) try (ResultSet rs = pgConn.prepareStatement( "SELECT " + "ROUND( " + " (COUNT(*) FILTER (WHERE state = 'active'))::NUMERIC / COUNT(*) * 100, " + " 2 " + ") AS cpu_usage, " + "ROUND( " + " (SELECT setting::NUMERIC FROM pg_settings WHERE name = 'shared_buffers') / 1024 / 1024 / 1024, " + " 2 " + ") AS memory_usage " + "FROM pg_stat_activity " + "LIMIT 1" ).executeQuery()) { if (rs.next()) { cpuUsage = rs.getDouble("cpu_usage"); memoryUsage = rs.getDouble("memory_usage"); } } // Connections try (ResultSet rs = pgConn.prepareStatement( "SELECT " + "COUNT(*) FILTER (WHERE state = 'active') AS active, " + "COUNT(*) FILTER (WHERE state IN ('idle','idle in transaction')) AS idle, " + "COUNT(*) FILTER (WHERE wait_event_type = 'LOCK') AS locked " + "FROM pg_stat_activity" ).executeQuery()) { if (rs.next()) { activeConn = rs.getInt("active"); idleConn = rs.getInt("idle"); lockedConn = rs.getInt("locked"); } } // Transaction & Cache try (ResultSet rs = pgConn.prepareStatement( "SELECT " + "xact_commit, " + "xact_rollback, " + "(blks_hit::FLOAT/(blks_hit + blks_read))*100 AS cache_hit_ratio " + "FROM pg_stat_database WHERE datname = current_database()" ).executeQuery()) { if (rs.next()) { xactCommit = rs.getLong("xact_commit"); xactRollback = rs.getLong("xact_rollback"); cacheHitRatio = rs.getDouble("cache_hit_ratio"); } } //采集PostgreSQL中耗时超3分钟的大事务 List<LongTransaction> longTransactions = new ArrayList<>(); try (PreparedStatement pstmt = pgConn.prepareStatement( "SELECT " + "pid AS transaction_id, " + "EXTRACT(EPOCH FROM (NOW() - xact_start)) AS duration_seconds, " + "query AS sql, " + "state " + "FROM pg_stat_activity " + "WHERE " + "xact_start IS NOT NULL " + // 存在活跃事务 "AND EXTRACT(EPOCH FROM (NOW() - xact_start)) > ? " + // 事务时长超阈值 "AND state <> 'idle'")) { pstmt.setLong(1, 180); try (ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { LongTransaction transaction = new LongTransaction(); transaction.setTransactionId(rs.getString("transaction_id")); transaction.setDurationSeconds(rs.getLong("duration_seconds")); transaction.setSql(rs.getString("sql")); transaction.setState(rs.getString("state")); longTransactions.add(transaction); longTransactionJson= JSONUtils.toJSONString(longTransactions); } } } // -------------------------- // 2. CDC 健康度分析 // -------------------------- CdcHealthResult health = CdcHealthResult.analyze( dbName, walBuffersFull, checkpointsReq, lockedConn, cpuUsage, cacheHitRatio, activeConn ); // -------------------------- // 3. 写入 Doris // -------------------------- try (Connection dorisConn = DriverManager.getConnection(DORIS_URL, DORIS_USER, DORIS_PASSWORD)) { String sql = "INSERT INTO t_pg_log (" + "id, " + "database_name, " + "checkpoints_timed, checkpoints_req, wal_buffers_full, " + "cpu_usage, memory_usage, active_connections, idle_connections, locked_connections, " + "xact_commit, xact_rollback, cache_hit_ratio, " + "cdc_health_level, cdc_health_reason, cdc_health_suggestion, longTransactionJson, create_time" + ") VALUES (?, ?, ?,?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; try (PreparedStatement pstmt = dorisConn.prepareStatement(sql)) { pstmt.setString(1, UUID.randomUUID().toString().replace("-","")); pstmt.setString(2, dbName); pstmt.setLong(3, checkpointsTimed); pstmt.setLong(4, checkpointsReq); pstmt.setLong(5, walBuffersFull); pstmt.setDouble(6, cpuUsage); pstmt.setDouble(7, memoryUsage); pstmt.setInt(8, activeConn); pstmt.setInt(9, idleConn); pstmt.setInt(10, lockedConn); pstmt.setLong(11, xactCommit); pstmt.setLong(12, xactRollback); pstmt.setDouble(13, cacheHitRatio); pstmt.setString(14, health.level); pstmt.setString(15, health.reason); pstmt.setString(16, health.suggestion); pstmt.setString(17, longTransactionJson); pstmt.setTimestamp(18, new Timestamp(System.currentTimeMillis())); pstmt.executeUpdate(); log.info("[{}] 数据库 {} 写入 Doris 成功:CDC 健康度={}", new Date(), dbName, health.level); } } } catch (SQLException e) { log.error("[{}] 数据库 {} 采集失败: {}", new Date(), dbName, e.getMessage()); } } } }

3.输出优化建议

package com.test.cdc; import org.apache.commons.lang3.StringUtils; public class CdcHealthResult { public String level; public String reason; public String suggestion; public CdcHealthResult(String level, String reason, String suggestion) { this.level = level; this.reason = reason; this.suggestion = suggestion; } public static CdcHealthResult analyze( String dbName, long walBuffersFull, long checkpointsReq, int lockedConn, double cpuUsage, double cacheHitRatio, int activeConn ) { // 计算周期增量(调用MetricCache) long walBuffersFullIncrement = MetricCache.getInstance() .getIncrement(dbName, "wal_buffers_full", walBuffersFull); // ALERT if (walBuffersFullIncrement > 100) { return new CdcHealthResult( "ALERT", "WAL 缓冲区写满,CDC 会被阻塞,延迟必然上升", "增大 wal_buffers、提升磁盘 IO、减少大事务" ); } if (lockedConn > 5) { return new CdcHealthResult( "ALERT", "锁等待过多(" + lockedConn + "),事务提交变慢,CDC 延迟会上升", "检查慢 SQL、长事务、锁竞争" ); } long checkpointsReqIncrement = MetricCache.getInstance() .getIncrement(dbName, "checkpoints_req", checkpointsReq); // WARNING if (checkpointsReqIncrement > 10 || cpuUsage > 80) { return new CdcHealthResult( "WARNING", "checkpoint 频繁或 CPU 过高,可能影响 CDC 性能", "增大 shared_buffers、优化 SQL、降低写入压力" ); } if (cacheHitRatio < 95) { return new CdcHealthResult( "WARNING", "缓存命中率低(" + String.format("%.2f", cacheHitRatio) + "%),磁盘 IO 升高", "增大 shared_buffers、优化索引" ); } if (activeConn > 200) { return new CdcHealthResult( "WARNING", "活跃连接过多(" + activeConn + "),PostgreSQL 压力增大", "优化连接池、减少长连接" ); } // HEALTHY return new CdcHealthResult( "HEALTHY", "PostgreSQL 状态良好,CDC 运行稳定", "继续保持当前配置" ); } }

4.对部分指标进行增量计算

package com.test.cdc; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; // 历史数据缓存类,单例模式 public class MetricCache { private static final MetricCache INSTANCE = new MetricCache(); // 存储各数据库的历史指标(key: databaseName,value: 指标Map) private final Map<String, Map<String, Long>> historyMetrics = new ConcurrentHashMap<>(); private MetricCache() {} public static MetricCache getInstance() { return INSTANCE; } // 保存当前指标,返回增量 public long getIncrement(String dbName, String metricName, long currentValue) { // 初始化数据库对应的指标Map historyMetrics.computeIfAbsent(dbName, k -> new HashMap<>()); Map<String, Long> dbMetrics = historyMetrics.get(dbName); if (!dbMetrics.containsKey(metricName)) { dbMetrics.put(metricName, currentValue); // 缓存当前值作为下次历史值 return -1; // 标识首次采集,不做增量计算,只初始值 } // 计算增量(首次采集时增量为0) long lastValue = dbMetrics.getOrDefault(metricName, 0L); long increment = currentValue - lastValue; // 更新历史值 dbMetrics.put(metricName, currentValue); return increment; } }

5.创建个事务类,用来记录监控耗时长的sql

package com.test.cdc; import lombok.Data; @Data public class LongTransaction { // 事务ID private String transactionId; // 事务时长(秒) private long durationSeconds; // 事务执行的SQL语句 private String sql; // 事务状态 private String state; }

6.doris表创建,创建个存放指标结果的日志表

CREATE TABLE t_pg_log ( `id` varchar(200) COMMENT '日志唯一标识', `database_name` varchar(150) COMMENT '数据库名称', `checkpoints_timed` BIGINT COMMENT '定时检查点次数', `checkpoints_req` BIGINT COMMENT '请求触发检查点次数', `wal_buffers_full` BIGINT COMMENT 'WAL 缓冲区写满次数', `cpu_usage` DOUBLE COMMENT 'CPU 使用率(%)', `memory_usage` DOUBLE COMMENT '内存使用率(GB)', `active_connections` INT COMMENT '活跃连接数', `idle_connections` INT COMMENT '空闲连接数', `locked_connections` INT COMMENT '被锁连接数', `xact_commit` BIGINT COMMENT '事务提交次数', `xact_rollback` BIGINT COMMENT '事务回滚次数', `cache_hit_ratio` DOUBLE COMMENT '缓存命中率(%)', `cdc_health_level` VARCHAR(20) COMMENT 'CDC 健康等级(HEALTHY/WARN/CRITICAL)', `cdc_health_reason` VARCHAR(500) COMMENT 'CDC 健康异常原因', `cdc_health_suggestion` VARCHAR(500) COMMENT 'CDC 健康优化建议', `longTransactionJson` string COMMENT '长事务json信息', `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '记录创建时间' ) unique KEY(`id`) COMMENT 'PostgreSQL 监控日志表' DISTRIBUTED BY HASH(`id`) PROPERTIES ( "replication_allocation" = "tag.location.default: 1" );

7.加个logback.xml辅助日志打印

<?xml version="1.0" encoding="UTF-8"?> <configuration scan="true" scanPeriod="60 seconds"> <!-- 关键修改:相对路径(与 Jar 包同级的 logs 目录) --> <property name="LOG_BASE_PATH" value="logs" /> <!-- 无需绝对路径,直接写目录名 --> <property name="LOG_FILE_NAME" value="pg-cdc-monitor" /> <property name="MAX_HISTORY" value="30" /> <property name="FILE_ENCODING" value="UTF-8" /> <!-- 控制台输出(可选,服务器部署可注释) --> <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> <charset>${FILE_ENCODING}</charset> </encoder> <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <level>INFO</level> </filter> </appender> <!-- 文件输出:相对路径滚动(核心不变) --> <appender name="FILE_ROLLING" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_BASE_PATH}/${LOG_FILE_NAME}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_BASE_PATH}/${LOG_FILE_NAME}.%d{yyyy-MM-dd}.%i.log</fileNamePattern> <maxHistory>${MAX_HISTORY}</maxHistory> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>200MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> <charset>${FILE_ENCODING}</charset> </encoder> <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <level>INFO</level> </filter> </appender> <!-- 错误日志单独输出(相对路径) --> <appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_BASE_PATH}/${LOG_FILE_NAME}-error.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_BASE_PATH}/${LOG_FILE_NAME}-error.%d{yyyy-MM-dd}.log</fileNamePattern> <maxHistory>${MAX_HISTORY}</maxHistory> </rollingPolicy> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> <charset>${FILE_ENCODING}</charset> </encoder> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>ERROR</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <root level="INFO"> <appender-ref ref="FILE_ROLLING" /> <appender-ref ref="ERROR_FILE" /> <appender-ref ref="CONSOLE" /> </root> <!-- 第三方依赖日志控制 --> <logger name="org.postgresql" level="WARN" /> <logger name="com.mysql.cj" level="WARN" /> <logger name="java.sql" level="WARN" /> </configuration>

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/1174258.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

永磁同步电机参数辨识模型,在线辨识,离线辨识,电参数机械参数均可辨识,基于最小二乘法,滑模观测...

永磁同步电机参数辨识模型&#xff0c;在线辨识&#xff0c;离线辨识&#xff0c;电参数机械参数均可辨识&#xff0c;基于最小二乘法&#xff0c;滑模观测&#xff0c;电压注入&#xff0c;模型参考自适应等 机械参数在线 离线 &#xff0c;电气参数在线 &#xff08;三种方法…

Redis技巧:处理大量Key同时过期的5个关键点!

文章目录如果有大量的key需要设置同一时间过期&#xff0c;一般需要注意什么&#xff1f;为什么会有大量Key同时过期&#xff1f;为什么大量Key同时过期会有问题&#xff1f;1. 内存回收压力大2. 阻塞操作3. 磁盘压力如何避免或缓解这些问题&#xff1f;1. 分散过期时间2. 使用…

深度测评专科生必备8款AI论文工具:开题报告文献综述全攻略

深度测评专科生必备8款AI论文工具&#xff1a;开题报告文献综述全攻略 2026年专科生论文写作工具深度测评&#xff1a;选对工具事半功倍 随着AI技术的不断进步&#xff0c;越来越多的专科生开始借助智能工具提升论文写作效率。然而&#xff0c;面对市场上五花八门的AI论文工具&…

2026年如何挑选国内新型高铁实训室优质厂商?诚信的实训室批发厂家聚焦优质品牌综合实力排行 - 品牌推荐师

随着职业教育与轨道交通行业的深度融合,高铁实训室作为培养专业技能人才的核心场景,其建设质量直接影响教学成果与行业人才供给效率。然而,当前市场厂商众多,技术路线、产品定位、服务能力差异显著,采购方在选型时…

基于深度学习yolov8的智能车牌识别系统设计(设计源文件+万字报告+讲解)(支持资料、图片参考_相关定制)_文章底部可以扫码

基于深度学习yolov8的智能车牌识别系统设计(设计源文件万字报告讲解)&#xff08;支持资料、图片参考_相关定制&#xff09;_文章底部可以扫码 如今智能交通系统中的车牌识别技术被广泛使用&#xff0c;在交通管制、监控安防、智能泊车等方面都有着良好的应用前景。但是传统车…

安徽合肥GEO公司找哪家 - 野榜数据排行

三十六行GEO:中国领先的区域深耕型本地服务营销伙伴 我们是谁? 三十六行GEO 是原三十六行网络科技公司的战略升级品牌,从新媒体传媒公司转型为 专注于中国各城市与区域的本地服务GEO解决方案专家。我们秉承“三十六…

免费网站进阶!——InfinityFree创建数据库教程

&#x1f496;InfinityFree 简介 InfinityFree是一个提供免费虚拟主机服务的平台。每个账户可创建3个站点&#xff0c;支持自定义域名&#xff08;需使用其提供的二级域名&#xff09; 1 ⭐创建网站详见另一篇博客&#xff1a; /* by 01130.hk - online tools website : 01130.…

【脑源定位】基于非负块稀疏贝叶斯学习算法脑电脑源定位附Matlab代码

✅作者简介&#xff1a;热爱科研的Matlab仿真开发者&#xff0c;擅长数据处理、建模仿真、程序设计、完整代码获取、论文复现及科研仿真。 &#x1f34e; 往期回顾关注个人主页&#xff1a;Matlab科研工作室 &#x1f447; 关注我领取海量matlab电子书和数学建模资料 &#…

2026福建草本塑魔仪抗衰套装公司权威推荐榜单:美航著妍草本年轻态团购 /美航草本年轻态加盟 /草本年轻态面膜美航/草本年轻态产品/ 美航草本年轻态企业精选 - 品牌推荐官

一项将草本活性成分提取率提升300%的超临界CO₂萃取技术,正在重新定义草本抗衰的科技标准。 随着健康消费理念的升级,融合了传统草本智慧与现代生物科技的“草本年轻态”抗衰市场,正成为大健康与美妆领域增长最快的…

2026年组装/厂区/工厂/花园/铁艺锌钢护栏实力厂家推荐:精选源头企业,适配工业、建筑、市政全场景 - 品牌推荐官

面对市政道路、工业园区、住宅小区、景区桥梁等多元化的应用场景,护栏的需求早已超出简单的隔离功能,向着安全性、耐久性、美观性和环境协调性等综合维度发展。01 市场格局与行业标准当前,护栏行业正经历从传统防护…

【数据分析】基于 RANSAC算法的鲁棒直线拟合实现附matlab代码

✅作者简介&#xff1a;热爱科研的Matlab仿真开发者&#xff0c;擅长数据处理、建模仿真、程序设计、完整代码获取、论文复现及科研仿真。 &#x1f34e; 往期回顾关注个人主页&#xff1a;Matlab科研工作室 &#x1f447; 关注我领取海量matlab电子书和数学建模资料 &#…

医疗边缘用Rust部署稳推理

&#x1f4dd; 博客主页&#xff1a;jaxzheng的CSDN主页 医疗边缘计算的稳定之锚&#xff1a;Rust语言在AI推理部署中的革命性应用目录医疗边缘计算的稳定之锚&#xff1a;Rust语言在AI推理部署中的革命性应用 引言&#xff1a;边缘医疗的稳定性危机 一、应用场景与价值&#x…

2025年上海组织效能人才管理咨询企业权威推荐榜单:跨境管理咨询 /数字化转型管理咨询 /组织效能人才管理咨询/ 管理咨询公司避坑/ 供应链管理咨询伙伴精选 - 品牌推荐官

在当今充满挑战的商业环境中,企业间的竞争日益聚焦于组织的内在效能与人才的核心竞争力。根据相关行业观察,超过70%的企业领导者认为,提升组织效能是应对市场不确定性、实现可持续发展的关键。与此同时,将人力资源…

2026年杭州滨江集团房源渠道权威解析:好房首选 /捡漏好房 /买房首选 /性价比房源/ 2026年新房平台甄选 - 品牌推荐官

在杭州,一套位于钱江新城二期核心区的精装大宅,其预售阶段的标准销售周期大约为3-6个月。对于购房者而言,如何在众多信息中快速、准确地锁定心仪的房源并建立联系渠道,是购房旅程中至关重要的第一步。 01 市场观察…

漂亮网站的prompt提示词 比如大气 扁平化等 - ukyo-

这里为你整理了一份描述网站风格的实用提示词集合,分为风格类型、视觉感受、交互体验和实用组合句式,方便你快速生成理想的网站设计描述: 一、视觉风格类型 基础风格: • 扁平化设计(Flat Design) • 极简主义(…

2026年真空干燥机行业全景报告与制造厂家推荐榜:立式/卧式/四轴螺旋/夹套螺旋/空心螺旋/连续高效真空干燥设备选型指南 - 品牌推荐官

在真空状态下,液态水的沸点可降至室温,这正是现代工业对温度敏感物料进行高效、节能干燥处理的技术基石。干燥作为工业生产中的关键单元操作,是物料保质、增效的核心环节。随着工艺精细化需求的提升,传统的常压干燥…

2026年专业的淮安整装,淮安系统门窗,淮安全包公司选型推荐手册 - 品牌鉴赏师

引言在建筑装饰装修行业蓬勃发展的当下,为了给淮安地区有整装、系统门窗及全包服务需求的消费者提供专业、客观、公正的选型参考,我们依据国内相关行业协会公开的数据及权威白皮书内容,编制了这份 2026 年专业的淮安…

Eunomia:分层卫星网络中多控制器域划分框架 - 指南

pre { white-space: pre !important; word-wrap: normal !important; overflow-x: auto !important; display: block !important; font-family: "Consolas", "Monaco", "Courier New", …

闪测仪厂家综合实力TOP5榜单:技术与市场的深度洞察 - 工业仪器权威说

闪测仪厂家综合实力TOP5榜单:技术与市场的深度洞察 在精密制造行业的舞台上,闪测仪宛如一颗璀璨的明星,作为高效且高精度的检测设备,其性能的优劣直接关乎产品质量的高低与生产效率的快慢。为了助力企业精准挑选出…

【MongoDB实战】5.1 聚合管道基础:理解阶段(Stage)概念 - 实践

pre { white-space: pre !important; word-wrap: normal !important; overflow-x: auto !important; display: block !important; font-family: "Consolas", "Monaco", "Courier New", …