pd/
migrate.rs

1//! Logic for handling chain upgrades.
2//!
3//! When consensus-breaking changes are made to the Penumbra software,
4//! node operators must coordinate to perform a chain upgrade.
5//! This module declares how local `pd` state should be altered, if at all,
6//! in order to be compatible with the network post-chain-upgrade.
7mod mainnet1;
8mod mainnet2;
9mod mainnet3;
10mod mainnet4;
11mod reset_halt_bit;
12mod simple;
13mod testnet72;
14mod testnet74;
15mod testnet76;
16mod testnet77;
17mod testnet78;
18
19use anyhow::{ensure, Context};
20use penumbra_sdk_governance::StateReadExt;
21use penumbra_sdk_sct::component::clock::EpochRead;
22use std::path::{Path, PathBuf};
23use tracing::instrument;
24
25use cnidarium::Storage;
26use penumbra_sdk_app::SUBSTORE_PREFIXES;
27
28use flate2::write::GzEncoder;
29use flate2::Compression;
30use std::fs::File;
31
32/// The kind of migration that should be performed.
33#[derive(Debug)]
34pub enum Migration {
35    /// Set the chain's halt bit to `false`.
36    ReadyToStart,
37    /// A simple migration: adds a key to the consensus state.
38    /// This is useful for testing upgrade mechanisms, including in production.
39    SimpleMigration,
40    /// Testnet-72 migration:
41    /// - Migrate `BatchSwapOutputData` to new protobuf, replacing epoch height with index.
42    Testnet72,
43    /// Testnet-74 migration:
44    /// - Update the base liquidity index to order routable pairs by descending liquidity
45    /// - Update arb executions to include the amount of filled input in the output
46    /// - Add `AuctionParameters` to the consensus state
47    Testnet74,
48    /// Testnet-76 migration:
49    /// - Heal the auction component's VCB tally.
50    /// - Update FMD parameters to new protobuf structure.
51    Testnet76,
52    /// Testnet-77 migration:
53    /// - Reset the halt bit
54    Testnet77,
55    /// Testnet-78 migration:
56    /// - Truncate various user-supplied `String` fields to a maximum length.
57    /// - Populate the DEX NV price idnexes with position data
58    Testnet78,
59    /// Mainnet-1 migration:
60    /// - Restore IBC packet commitments for improperly handled withdrawal attempts
61    Mainnet1,
62    /// Mainnet-2 migration:
63    /// - no-op
64    Mainnet2,
65    /// Mainnet-3 migration:
66    /// - no-op
67    Mainnet3,
68    /// Mainnet-4 migration:
69    /// - no-op
70    ///
71    /// Intended to support code upgrades for Liquidity Tournament support.
72    Mainnet4,
73}
74
75impl Migration {
76    #[instrument(skip(pd_home, genesis_start, force))]
77    pub async fn migrate(
78        &self,
79        pd_home: PathBuf,
80        comet_home: Option<PathBuf>,
81        genesis_start: Option<tendermint::time::Time>,
82        force: bool,
83    ) -> anyhow::Result<()> {
84        tracing::debug!(
85            ?pd_home,
86            ?genesis_start,
87            ?force,
88            "preparing to run migration!"
89        );
90        let rocksdb_dir = pd_home.join("rocksdb");
91        let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
92        ensure!(
93            storage.latest_snapshot().is_chain_halted().await || force,
94            "to run a migration, the chain halt bit must be set to `true` or use the `--force` cli flag"
95        );
96
97        // Assert that the local chain state version is not corrupted, see `v0.80.10` release notes.
98        let latest_version = storage.latest_version();
99        let block_height = storage.latest_snapshot().get_block_height().await?;
100        ensure!(
101            latest_version == block_height || force,
102            "local chain state version is corrupted: {} != {}",
103            latest_version,
104            block_height
105        );
106
107        tracing::info!("started migration");
108
109        // If this is `ReadyToStart`, we need to reset the halt bit and return early.
110        if let Migration::ReadyToStart = self {
111            reset_halt_bit::migrate(storage, pd_home, genesis_start).await?;
112            return Ok(());
113        }
114
115        match self {
116            Migration::SimpleMigration => {
117                simple::migrate(storage, pd_home.clone(), genesis_start).await?
118            }
119            Migration::Mainnet1 => {
120                mainnet1::migrate(storage, pd_home.clone(), genesis_start).await?;
121            }
122            Migration::Mainnet2 => {
123                mainnet2::migrate(storage, pd_home.clone(), genesis_start).await?;
124            }
125            Migration::Mainnet3 => {
126                mainnet3::migrate(storage, pd_home.clone(), genesis_start).await?;
127            }
128            Migration::Mainnet4 => {
129                mainnet4::migrate(storage, pd_home.clone(), genesis_start).await?;
130            }
131            // We keep historical migrations around for now, this will help inform an abstracted
132            // design. Feel free to remove it if it's causing you trouble.
133            _ => unimplemented!("the specified migration is unimplemented"),
134        }
135
136        if let Some(comet_home) = comet_home {
137            let genesis_path = pd_home.join("genesis.json");
138            migrate_comet_data(comet_home, genesis_path).await?;
139        }
140
141        Ok(())
142    }
143}
144
145/// Compress single directory to gzipped tar archive. Accepts an Option for naming
146/// the subdir within the tar archive, which defaults to ".", meaning no nesting.
147pub fn archive_directory(
148    src_directory: PathBuf,
149    archive_filepath: PathBuf,
150    subdir_within_archive: Option<String>,
151) -> anyhow::Result<()> {
152    // Don't clobber an existing target archive.
153    if archive_filepath.exists() {
154        tracing::error!(
155            "export archive filepath already exists: {}",
156            archive_filepath.display()
157        );
158        anyhow::bail!("refusing to overwrite existing archive");
159    }
160
161    tracing::info!(
162        "creating archive {} -> {}",
163        src_directory.display(),
164        archive_filepath.display()
165    );
166    let tarball_file = File::create(&archive_filepath)
167        .context("failed to create file for archive: check parent directory and permissions")?;
168    let enc = GzEncoder::new(tarball_file, Compression::default());
169    let mut tarball = tar::Builder::new(enc);
170    let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
171    tarball
172        .append_dir_all(subdir_within_archive, src_directory.as_path())
173        .context("failed to package archive contents")?;
174    Ok(())
175}
176
177/// Read the last block timestamp from the pd state.
178pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
179    let rocksdb = home.join("rocksdb");
180    let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
181        .await
182        .context("error loading store for timestamp")?;
183    let state = storage.latest_snapshot();
184    let last_block_time = state
185        .get_current_block_timestamp()
186        .await
187        .context("error reading latest block timestamp")?;
188    storage.release().await;
189    Ok(last_block_time)
190}
191
192#[instrument(skip_all)]
193pub async fn migrate_comet_data(
194    comet_home: PathBuf,
195    new_genesis_file: PathBuf,
196) -> anyhow::Result<()> {
197    tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
198
199    // Read the contents of new_genesis_file into a serde_json::Value and pull out .initial_height
200    let genesis_contents =
201        std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
202    let genesis_json: serde_json::Value =
203        serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
204    tracing::info!(?genesis_json, "parsed genesis file");
205    let initial_height = genesis_json["initial_height"]
206        .as_str()
207        .context("error reading initial_height from genesis file")?
208        .parse::<u64>()?;
209
210    // Write the genesis data to HOME/config/genesis.json
211    let genesis_file = comet_home.join("config").join("genesis.json");
212    tracing::info!(?genesis_file, "writing genesis file to comet config");
213    std::fs::write(genesis_file, genesis_contents)
214        .context("error writing genesis file to comet config")?;
215
216    // Adjust the high-water mark in priv_validator_state.json but don't decrease it
217    adjust_priv_validator_state(&comet_home, initial_height)?;
218
219    // Delete other cometbft data.
220    clear_comet_data(&comet_home)?;
221
222    Ok(())
223}
224
225#[instrument(skip_all)]
226fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
227    let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
228    let current_state: serde_json::Value =
229        serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
230
231    let current_height = current_state["height"]
232        .as_str()
233        .context("error reading height from priv_validator_state.json")?
234        .parse::<u64>()?;
235    if current_height < initial_height {
236        tracing::info!(
237            "increasing height in priv_validator_state from {} to {}",
238            current_height,
239            initial_height
240        );
241        let new_state = serde_json::json!({
242            "height": initial_height.to_string(), // Important to use to_string here as if protojson
243            "round": 0,
244            "step": 0,
245        });
246        tracing::info!(?new_state, "updated priv_validator_state.json");
247        std::fs::write(
248            &priv_validator_state,
249            &serde_json::to_string_pretty(&new_state)?,
250        )?;
251    } else {
252        anyhow::bail!(
253            "priv_validator_state height {} is already greater than or equal to initial_height {}",
254            current_height,
255            initial_height
256        );
257    }
258
259    Ok(())
260}
261
262#[instrument(skip_all)]
263fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
264    let data_dir = comet_home.join("data");
265
266    /*
267    N.B. We want to preserve the `tx_index.db` directory.
268    Doing so will allow CometBFT to reference historical transactions behind the upgrade boundary.
269     */
270    for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
271        let path = data_dir.join(subdir);
272        if path.exists() {
273            tracing::info!(?path, "removing file");
274            std::fs::remove_dir_all(path)?;
275        }
276    }
277
278    Ok(())
279}