pd/
migrate.rs

1//! Logic for handling chain upgrades.
2//!
3//! When consensus-breaking changes are made to the Penumbra software,
4//! node operators must coordinate to perform a chain upgrade.
5//! This module declares how local `pd` state should be altered, if at all,
6//! in order to be compatible with the network post-chain-upgrade.
7mod mainnet1;
8mod mainnet2;
9mod mainnet3;
10mod mainnet4;
11mod migrate2;
12mod reset_halt_bit;
13mod simple;
14mod testnet72;
15mod testnet74;
16mod testnet76;
17mod testnet77;
18mod testnet78;
19
20use anyhow::{ensure, Context};
21use penumbra_sdk_governance::StateReadExt;
22use penumbra_sdk_sct::component::clock::EpochRead;
23use std::path::{Path, PathBuf};
24use tracing::instrument;
25
26use migrate2::Migration as MigrationTrait;
27
28use cnidarium::Storage;
29use penumbra_sdk_app::SUBSTORE_PREFIXES;
30
31use flate2::write::GzEncoder;
32use flate2::Compression;
33use std::fs::File;
34
35/// The kind of migration that should be performed.
36#[derive(Debug)]
37pub enum Migration {
38    /// Set the chain's halt bit to `false`.
39    ReadyToStart,
40    /// A simple migration: adds a key to the consensus state.
41    /// This is useful for testing upgrade mechanisms, including in production.
42    SimpleMigration,
43    /// Testnet-72 migration:
44    /// - Migrate `BatchSwapOutputData` to new protobuf, replacing epoch height with index.
45    Testnet72,
46    /// Testnet-74 migration:
47    /// - Update the base liquidity index to order routable pairs by descending liquidity
48    /// - Update arb executions to include the amount of filled input in the output
49    /// - Add `AuctionParameters` to the consensus state
50    Testnet74,
51    /// Testnet-76 migration:
52    /// - Heal the auction component's VCB tally.
53    /// - Update FMD parameters to new protobuf structure.
54    Testnet76,
55    /// Testnet-77 migration:
56    /// - Reset the halt bit
57    Testnet77,
58    /// Testnet-78 migration:
59    /// - Truncate various user-supplied `String` fields to a maximum length.
60    /// - Populate the DEX NV price idnexes with position data
61    Testnet78,
62    /// Mainnet-1 migration:
63    /// - Restore IBC packet commitments for improperly handled withdrawal attempts
64    Mainnet1,
65    /// Mainnet-2 migration:
66    /// - no-op
67    Mainnet2,
68    /// Mainnet-3 migration:
69    /// - no-op
70    Mainnet3,
71    /// Mainnet-4 migration:
72    /// - no-op
73    ///
74    /// Intended to support code upgrades for Liquidity Tournament support.
75    Mainnet4,
76    /// Mainnet-5 migration:
77    /// - no-op
78    ///
79    /// Uses the new migration framework.
80    Mainnet5,
81    /// IBC client recovery
82    /// - Swap IBC client state
83    IbcClientRecovery,
84    /// No-op migration
85    /// - Resets halt bit and produces new genesis without state changes
86    NoOp,
87}
88
89impl Migration {
90    #[instrument(skip(pd_home, genesis_start, force))]
91    pub async fn migrate(
92        &self,
93        pd_home: PathBuf,
94        comet_home: Option<PathBuf>,
95        genesis_start: Option<tendermint::time::Time>,
96        force: bool,
97    ) -> anyhow::Result<()> {
98        self.migrate_with_params(pd_home, comet_home, genesis_start, force, vec![])
99            .await
100    }
101
102    #[instrument(skip(pd_home, genesis_start, force, params))]
103    pub async fn migrate_with_params(
104        &self,
105        pd_home: PathBuf,
106        comet_home: Option<PathBuf>,
107        genesis_start: Option<tendermint::time::Time>,
108        force: bool,
109        params: Vec<String>,
110    ) -> anyhow::Result<()> {
111        tracing::debug!(
112            ?pd_home,
113            ?genesis_start,
114            ?force,
115            "preparing to run migration!"
116        );
117        let rocksdb_dir = pd_home.join("rocksdb");
118        let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
119        ensure!(
120            storage.latest_snapshot().is_chain_halted().await || force,
121            "to run a migration, the chain halt bit must be set to `true` or use the `--force` cli flag"
122        );
123
124        // Assert that the local chain state version is not corrupted, see `v0.80.10` release notes.
125        let latest_version = storage.latest_version();
126        let block_height = storage.latest_snapshot().get_block_height().await?;
127        ensure!(
128            latest_version == block_height || force,
129            "local chain state version is corrupted: {} != {}",
130            latest_version,
131            block_height
132        );
133
134        tracing::info!("started migration");
135
136        // We early return :
137        // - using the migration framework (as opposed to legacy migrations)
138        // - using a ready-to-start or ibc-client-recovery recipe.
139
140        match self {
141            Migration::SimpleMigration => {
142                simple::migrate(storage, pd_home.clone(), genesis_start).await?
143            }
144            Migration::Mainnet1 => {
145                mainnet1::migrate(storage, pd_home.clone(), genesis_start).await?;
146            }
147            Migration::Mainnet2 => {
148                mainnet2::migrate(storage, pd_home.clone(), genesis_start).await?;
149            }
150            Migration::Mainnet3 => {
151                mainnet3::migrate(storage, pd_home.clone(), genesis_start).await?;
152            }
153            Migration::Mainnet4 => {
154                mainnet4::migrate(storage, pd_home.clone(), genesis_start).await?;
155            }
156            Migration::ReadyToStart => {
157                reset_halt_bit::migrate(storage, pd_home, genesis_start).await?;
158                // Early return since we are not producing a new genesis.
159                return Ok(());
160            }
161            Migration::IbcClientRecovery => {
162                storage.release().await;
163                ensure!(
164                    params.len() >= 2,
165                    "IBC client recovery requires at least old and new client IDs"
166                );
167                // All validation is done inside of the migration recipe.
168                let old_client_id = params[0].clone();
169                let new_client_id = params[1].clone();
170
171                // Parse optional app_version from third parameter
172                let app_version = if params.len() >= 3 && !params[2].is_empty() {
173                    Some(
174                        params[2]
175                            .parse::<u64>()
176                            .context("app_version must be a valid u64")?,
177                    )
178                } else {
179                    None
180                };
181
182                let migration = migrate2::ibc_client_recovery::IbcClientRecoveryMigration::new(
183                    old_client_id,
184                    new_client_id,
185                    app_version,
186                );
187                migration
188                    .run(pd_home.clone(), comet_home.clone(), genesis_start)
189                    .await?;
190                // Early return since the new framework handles genesis generation.
191                return Ok(());
192            }
193            Migration::NoOp => {
194                storage.release().await;
195
196                // Parse optional app_version from first parameter
197                let app_version = if !params.is_empty() && !params[0].is_empty() {
198                    Some(
199                        params[0]
200                            .parse::<u64>()
201                            .context("app_version must be a valid u64")?,
202                    )
203                } else {
204                    None
205                };
206
207                let migration = migrate2::noop::NoOpMigration::new(app_version);
208                migration
209                    .run(pd_home.clone(), comet_home.clone(), genesis_start)
210                    .await?;
211                // Early return since the new framework handles genesis generation.
212                return Ok(());
213            }
214            // We keep historical migrations around for now, this will help inform an abstracted
215            // design. Feel free to remove it if it's causing you trouble.
216            _ => unimplemented!("the specified migration is unimplemented"),
217        }
218
219        if let Some(comet_home) = comet_home {
220            let genesis_path = pd_home.join("genesis.json");
221            migrate_comet_data(comet_home, genesis_path).await?;
222        }
223
224        Ok(())
225    }
226}
227
228/// Compress single directory to gzipped tar archive. Accepts an Option for naming
229/// the subdir within the tar archive, which defaults to ".", meaning no nesting.
230pub fn archive_directory(
231    src_directory: PathBuf,
232    archive_filepath: PathBuf,
233    subdir_within_archive: Option<String>,
234) -> anyhow::Result<()> {
235    // Don't clobber an existing target archive.
236    if archive_filepath.exists() {
237        tracing::error!(
238            "export archive filepath already exists: {}",
239            archive_filepath.display()
240        );
241        anyhow::bail!("refusing to overwrite existing archive");
242    }
243
244    tracing::info!(
245        "creating archive {} -> {}",
246        src_directory.display(),
247        archive_filepath.display()
248    );
249    let tarball_file = File::create(&archive_filepath)
250        .context("failed to create file for archive: check parent directory and permissions")?;
251    let enc = GzEncoder::new(tarball_file, Compression::default());
252    let mut tarball = tar::Builder::new(enc);
253    let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
254    tarball
255        .append_dir_all(subdir_within_archive, src_directory.as_path())
256        .context("failed to package archive contents")?;
257    Ok(())
258}
259
260/// Read the last block timestamp from the pd state.
261pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
262    let rocksdb = home.join("rocksdb");
263    let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
264        .await
265        .context("error loading store for timestamp")?;
266    let state = storage.latest_snapshot();
267    let last_block_time = state
268        .get_current_block_timestamp()
269        .await
270        .context("error reading latest block timestamp")?;
271    storage.release().await;
272    Ok(last_block_time)
273}
274
275#[instrument(skip_all)]
276pub async fn migrate_comet_data(
277    comet_home: PathBuf,
278    new_genesis_file: PathBuf,
279) -> anyhow::Result<()> {
280    tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
281
282    // Read the contents of new_genesis_file into a serde_json::Value and pull out .initial_height
283    let genesis_contents =
284        std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
285    let genesis_json: serde_json::Value =
286        serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
287    tracing::info!(?genesis_json, "parsed genesis file");
288    let initial_height = genesis_json["initial_height"]
289        .as_str()
290        .context("error reading initial_height from genesis file")?
291        .parse::<u64>()?;
292
293    // Write the genesis data to HOME/config/genesis.json
294    let genesis_file = comet_home.join("config").join("genesis.json");
295    tracing::info!(?genesis_file, "writing genesis file to comet config");
296    std::fs::write(genesis_file, genesis_contents)
297        .context("error writing genesis file to comet config")?;
298
299    // Adjust the high-water mark in priv_validator_state.json but don't decrease it
300    adjust_priv_validator_state(&comet_home, initial_height)?;
301
302    // Delete other cometbft data.
303    clear_comet_data(&comet_home)?;
304
305    Ok(())
306}
307
308#[instrument(skip_all)]
309fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
310    let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
311    let current_state: serde_json::Value =
312        serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
313
314    let current_height = current_state["height"]
315        .as_str()
316        .context("error reading height from priv_validator_state.json")?
317        .parse::<u64>()?;
318    if current_height < initial_height {
319        tracing::info!(
320            "increasing height in priv_validator_state from {} to {}",
321            current_height,
322            initial_height
323        );
324        let new_state = serde_json::json!({
325            "height": initial_height.to_string(), // Important to use to_string here as if protojson
326            "round": 0,
327            "step": 0,
328        });
329        tracing::info!(?new_state, "updated priv_validator_state.json");
330        std::fs::write(
331            &priv_validator_state,
332            &serde_json::to_string_pretty(&new_state)?,
333        )?;
334    } else {
335        anyhow::bail!(
336            "priv_validator_state height {} is already greater than or equal to initial_height {}",
337            current_height,
338            initial_height
339        );
340    }
341
342    Ok(())
343}
344
345#[instrument(skip_all)]
346fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
347    let data_dir = comet_home.join("data");
348
349    /*
350    N.B. We want to preserve the `tx_index.db` directory.
351    Doing so will allow CometBFT to reference historical transactions behind the upgrade boundary.
352     */
353    for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
354        let path = data_dir.join(subdir);
355        if path.exists() {
356            tracing::info!(?path, "removing file");
357            std::fs::remove_dir_all(path)?;
358        }
359    }
360
361    Ok(())
362}