package telegram

import (
	
	

	
	
	
	

	
	
	
)

func ( *Client) ( context.Context) error {
	 := tdsync.NewCancellableGroup()
	.Go(.conn.Run)

	// If we don't need updates, so there is no reason to subscribe for it.
	if !.noUpdatesMode {
		.Go(func( context.Context) error {
			// Call method which requires authorization, to subscribe for updates.
			// See https://core.telegram.org/api/updates#subscribing-to-updates.
			,  := .Self()
			if  != nil {
				// Ignore unauthorized errors.
				if !auth.IsUnauthorized() {
					.log.Warn("Got error on self", zap.Error())
				}
				return nil
			}

			.log.Info("Got self", zap.String("username", .Username))
			return nil
		})
	}

	.Go(func( context.Context) error {
		select {
		case <-.Done():
			return .Err()
		case <-.restart:
			.log.Debug("Restart triggered")
			// Should call cancel() to cancel group.
			.Cancel()

			return nil
		}
	})

	return .Wait()
}

func ( *Client) ( error) bool {
	return errors.Is(, exchange.ErrKeyFingerprintNotFound)
}

func ( *Client) ( context.Context) error {
	// Note that we currently have no timeout on connection, so this is
	// potentially eternal.
	 := tdsync.SyncBackoff(backoff.WithContext(.connBackoff(), ))

	return backoff.RetryNotify(func() error {
		if  := .runUntilRestart();  != nil {
			if .isPermanentError() {
				return backoff.Permanent()
			}
			return 
		}

		return nil
	}, , func( error,  time.Duration) {
		.log.Info("Restarting connection", zap.Error(), zap.Duration("backoff", ))

		.connMux.Lock()
		.conn = .createPrimaryConn(nil)
		.connMux.Unlock()
	})
}

func ( *Client) () {
	.log.Debug("Ready")
	.ready.Signal()
}

func ( *Client) () {
	.ready.Reset()
}

// Run starts client session and blocks until connection close.
// The f callback is called on successful session initialization and Run
// will return on f() result.
//
// Context of callback will be canceled if fatal error is detected.
// The ctx is used for background operations like updates handling or pools.
//
// See `examples/bg-run` and `contrib/gb` package for classic approach without
// explicit callback, with Connect and defer close().
func ( *Client) ( context.Context,  func( context.Context) error) ( error) {
	if .ctx != nil {
		select {
		case <-.ctx.Done():
			return errors.Wrap(.ctx.Err(), "client already closed")
		default:
		}
	}

	// Setting up client context for background operations like updates
	// handling or pool creation.
	.ctx, .cancel = context.WithCancel()

	.log.Info("Starting")
	defer .log.Info("Closed")
	// Cancel client on exit.
	defer .cancel()
	defer func() {
		.subConnsMux.Lock()
		defer .subConnsMux.Unlock()

		for ,  := range .subConns {
			if  := .Close(); !errors.Is(, context.Canceled) {
				multierr.AppendInto(&, )
			}
		}
	}()

	.resetReady()
	if  := .restoreConnection();  != nil {
		return 
	}

	 := tdsync.NewCancellableGroup()
	.Go(.reconnectUntilClosed)
	.Go(func( context.Context) error {
		select {
		case <-.Done():
			.cancel()
			return .Err()
		case <-.ctx.Done():
			return .ctx.Err()
		}
	})
	.Go(func( context.Context) error {
		select {
		case <-.Done():
			return .Err()
		case <-.ready.Ready():
			if  := ();  != nil {
				return errors.Wrap(, "callback")
			}
			// Should call cancel() to cancel ctx.
			// This will terminate c.conn.Run().
			.log.Debug("Callback returned, stopping")
			.Cancel()
			return nil
		}
	})
	if  := .Wait(); !errors.Is(, context.Canceled) {
		return 
	}

	return nil
}